diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index 3e10d9b882fd..0c94c6cd80fa 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -10,7 +10,6 @@ ath9k-y += beacon.o \ channel.o ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o -// focus: wiptp ath9k-$(CONFIG_ATH9K_PCI) += pci.o ptp.o ath9k-$(CONFIG_ATH9K_AHB) += ahb.o ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c index d0f1e8bcc846..fbf7a1293951 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_aic.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c @@ -248,6 +248,7 @@ static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count) (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) | ATH_AIC_BT_AIC_ENABLE)); + atomic64_inc(&ah->ptp_tsf_aic_read_cnt); aic->aic_cal_start_time = REG_READ(ah, AR_TSF_L32); /* Start calibration */ diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index c7e9c64826ca..aa806f9e501d 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -24,16 +24,20 @@ #include #include #include +#include +#include +#include +#include -// focus: wiptp #include #include +#include #include -// focus: wiptp #include #include +#include #include #include "common.h" @@ -118,8 +122,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, /* minimum h/w qdepth to be sustained to maximize aggregation */ #define ATH_AGGR_MIN_QDEPTH 2 /* minimum h/w qdepth for non-aggregated traffic */ -// focus: wiptp -#define ATH_NON_AGGR_MIN_QDEPTH 32 // FIXME: wiptp -> 32 / default -> 8 +#define ATH_NON_AGGR_MIN_QDEPTH 32 #define ATH_HW_CHECK_POLL_INT 1000 #define ATH_TXFIFO_DEPTH 8 @@ -591,7 +594,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc); int ath_rx_init(struct ath_softc *sc, int nbufs); void ath_rx_cleanup(struct ath_softc *sc); -//focus: wiptp int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp, ktime_t *tstamp); struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); @@ -998,6 +1000,362 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); #define ATH9K_NUM_CHANCTX 2 /* supports 2 operating channels */ +#define ATH9K_PTP_EVTLOG_LEN_DEFAULT 4096 +#define ATH9K_PTP_EVTLOG_HDR_LEN 64 +#define ATH9K_PTP_EVTLOG_PAYLOAD_LEN 128 +#define ATH9K_PTP_STACK_DEPTH 16 +#define ATH9K_PTP_TC_TRACE_LEN_DEFAULT 2048 +#define ATH9K_PTP_HANDOFF_LEN_DEFAULT 256 +#define ATH9K_PTP_RING_MIN 256 +#define ATH9K_PTP_RING_MAX 16384 +#define ATH9K_PTP_MSGTYPE_MAX 16 +#define ATH9K_PTP_MSGTYPE_SYNC 0x0 +#define ATH9K_PTP_MSGTYPE_DELAY_REQ 0x1 +#define ATH9K_PTP_MSGTYPE_FOLLOW_UP 0x8 +#define ATH9K_PTP_MSGTYPE_DELAY_RESP 0x9 +#define ATH9K_PTP_RX_PARSE_BYTES 64 + +enum ath9k_ptp_parse_reason { + ATH9K_PTP_PARSE_OK = 0, + ATH9K_PTP_PARSE_NULL_SKB, + ATH9K_PTP_PARSE_SHORT_HDR, + ATH9K_PTP_PARSE_NONDATA, + ATH9K_PTP_PARSE_SHORT_PAYLOAD, + ATH9K_PTP_PARSE_NO_SNAP, + ATH9K_PTP_PARSE_SNAP_SHORT, + ATH9K_PTP_PARSE_L2_PTP, + ATH9K_PTP_PARSE_IPV4_SHORT, + ATH9K_PTP_PARSE_IPV4_NOT_UDP, + ATH9K_PTP_PARSE_IPV4_NOT_PTP_PORT, + ATH9K_PTP_PARSE_IPV6_SHORT, + ATH9K_PTP_PARSE_IPV6_NOT_UDP, + ATH9K_PTP_PARSE_IPV6_NOT_PTP_PORT, + ATH9K_PTP_PARSE_UNKNOWN_ETHERTYPE, + ATH9K_PTP_PARSE_PTP_SHORT, +}; + +struct ath9k_ptp_parse_info { + u8 reason; + u8 ip_version; + u8 ip_proto; + u8 ihl; + u16 hdrlen; + u16 payload_len; + u16 snap_off; + u16 ethertype; + u16 sport; + u16 dport; + u16 ptp_off; + u16 ptp_len; +}; + +enum ath9k_ptp_evtlog_event { + ATH9K_PTP_EVT_RX = 0, + ATH9K_PTP_EVT_TX = 1, + ATH9K_PTP_EVT_REBASE = 2, + ATH9K_PTP_EVT_ADJTIME = 3, + ATH9K_PTP_EVT_ADJFINE = 4, + ATH9K_PTP_EVT_SETTIME = 5, + ATH9K_PTP_EVT_SETTSF = 6, + ATH9K_PTP_EVT_CC_GLITCH = 7, + ATH9K_PTP_EVT_RESET_TSF = 8, + ATH9K_PTP_EVT_PHC_GET = 9, +}; + +enum ath9k_ptp_tc_trace_event { + ATH9K_PTP_TC_TRACE_CYC2TIME = 0, + ATH9K_PTP_TC_TRACE_READ = 1, + ATH9K_PTP_TC_TRACE_SET_CYCLE = 2, +}; + +enum ath9k_ptp_tc_trace_reason { + ATH9K_PTP_TC_TRACE_RSN_NONE = 0, + ATH9K_PTP_TC_TRACE_RSN_RX = 1, + ATH9K_PTP_TC_TRACE_RSN_TX = 2, + ATH9K_PTP_TC_TRACE_RSN_GETTIME = 3, + ATH9K_PTP_TC_TRACE_RSN_ADJTIME_PRE = 4, + ATH9K_PTP_TC_TRACE_RSN_ADJTIME_POST = 5, + ATH9K_PTP_TC_TRACE_RSN_SETTIME_PRE = 6, + ATH9K_PTP_TC_TRACE_RSN_SETTIME_POST = 7, + ATH9K_PTP_TC_TRACE_RSN_ADJFINE = 8, +}; + +enum ath9k_ptp_tc_anomaly_source { + ATH9K_PTP_TC_ANOM_SRC_NONE = 0, + ATH9K_PTP_TC_ANOM_SRC_CC = 1, + ATH9K_PTP_TC_ANOM_SRC_TC = 2, +}; + +enum ath9k_ptp_tc_mutation_source { + ATH9K_PTP_TC_MUT_NONE = 0, + ATH9K_PTP_TC_MUT_INIT = 1, + ATH9K_PTP_TC_MUT_CC_SHIFT = 2, + ATH9K_PTP_TC_MUT_ADJTIME = 3, + ATH9K_PTP_TC_MUT_SETTIME = 4, +}; + +struct ath9k_ptp_tc_trace_entry { + u32 seq; + u8 event; + u8 reason; + u8 backward; + u8 pad[1]; + u64 ts_ns; + u64 cycle_in; + u64 cycle_last; + u64 nsec; + u64 frac; + u64 delta; + u64 ns_offset; + u64 ns_out; + u64 cc_mult; + u64 cc_mask; + u32 cc_shift; + u32 caller_pid; + char caller_comm[TASK_COMM_LEN]; +}; + +struct ath9k_ptp_evtlog_entry { + u32 seq; + u8 event; + u8 reserved[3]; + u64 tsf64; + u32 tstamp; + u32 duration; + u32 duration_mid; + u32 tstamp_last; + u32 tstamp_delta; + s32 tstamp_delta_s; + u8 wrap_valid; + u8 settsf_dur_hist_max; + u8 settsf_dur_hist_cnt; + u8 pad2[1]; + u64 ext_prev; + u64 ext; + u64 hwtstamp_ns; + s64 tsf_offset; + u64 phc_tsf; + u64 adj_seq; + u64 rebases; + u64 wraps; + u64 anchor_phc; + u8 anchor_valid; + u8 pad[7]; + u64 settime_old_ns; + u64 settime_new_ns; + s64 settime_delta; + u64 settsf_old; + u64 settsf_new; + s64 settsf_delta; + u32 tsf_hi1; + u32 tsf_lo; + u32 tsf_hi2; + u64 cc_last; + u64 cc_new; + s64 cc_delta; + u32 caller_pid; + char caller_comm[TASK_COMM_LEN]; + u64 sample_cycle; + u64 sample_ns; + u64 sample_rebase_cnt; + s64 sample_tsf_offset; + u32 sample_epoch; + u8 sample_epoch_valid; + u8 pad4[3]; + u64 tc_cycle_last; + u64 tc_nsec; + u64 tc_frac; + u64 cc_mult; + u64 cc_mask; + u32 cc_shift; + u8 ptp_msgtype; + u8 ptp_valid; + u16 ptp_seqid; + u16 hdr_len; + u16 payload_len; + u8 stack_len; + u8 pad3[3]; + u8 hdr[ATH9K_PTP_EVTLOG_HDR_LEN]; + u8 payload[ATH9K_PTP_EVTLOG_PAYLOAD_LEN]; + unsigned long stack_entries[ATH9K_PTP_STACK_DEPTH]; + u64 ptp_ts_ns; + u8 ptp_ts_valid; + u8 pad5[7]; + u64 settsf_read_start_ns; + u64 settsf_read_end_ns; + u64 settsf_write_start_ns; + u64 settsf_write_end_ns; + u32 settsf_read_ns; + u32 settsf_write_ns; + u32 settsf_total_ns; + u32 settsf_epoch; + s64 settsf_epoch_offset_ns; + u8 settsf_epoch_valid; + u8 pad6[7]; +}; + +struct ath9k_ptp_sample { + u64 tsf64; + u32 tstamp; + u32 duration; + u32 read_dur_ns; + u64 ext; + u64 rebase_cnt; + s64 tsf_offset; + s64 tsf_offset_ns_rem; + u32 epoch; + u8 epoch_valid; + u8 pad4[3]; + u64 cycle; + u64 ns; + u64 tc_cycle_last; + u64 tc_nsec; + u64 tc_frac; + u64 adj_seq; +}; + +enum ath9k_ptp_handoff_dir { + ATH9K_PTP_HANDOFF_RX = 0, + ATH9K_PTP_HANDOFF_TX = 1, +}; + +struct ath9k_ptp_handoff_entry { + u32 seq; + u8 dir; + u8 ptp_msgtype; + u8 epoch_valid; + u8 hwts_valid; + u16 ptp_seqid; + u16 frame_control; + u16 seq_ctrl; + u16 wlan_seq; + u16 frag; + u16 qos_control; + u8 retry; + u8 qos_valid; + u8 tid; + u8 skb_priority; + u8 skb_queue; + u8 tx_hw_queue; + u8 txq_qnum; + s8 txq_mac80211_qnum; + u8 tx_qid; + u8 tx_tid; + u8 tx_rateindex; + u8 tx_shortretry; + u8 tx_longretry; + s8 tx_rssi; + u16 tx_status; + u16 tx_flags; + u32 tx_info_flags; + u32 txq_depth; + u32 txq_ampdu_depth; + u32 txq_pending_frames; + u16 txq_aifs; + u16 txq_cwmin; + u16 txq_cwmax; + u32 txq_burst_time; + u32 txq_ready_time; + u8 bcnq_qnum; + u16 bcnq_aifs; + u16 bcnq_cwmin; + u16 bcnq_cwmax; + u32 bcnq_burst_time; + u32 bcnq_ready_time; + u32 rx_rxs_flags; + u32 rx_ampdu_reference; + u16 rx_enc_flags; + u8 rx_rs_status; + u8 rx_rs_flags; + u8 rx_phyerr; + s8 rx_rssi; + s8 rx_signal; + u8 rx_keyix; + u8 rx_rate; + u8 rx_rate_idx; + u8 rx_antenna; + u8 rx_bw; + u8 rx_nss; + u8 rx_chains; + u8 rx_more; + u8 rx_isaggr; + u8 rx_firstaggr; + u8 rx_moreaggr; + u8 rx_num_delims; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + u64 handoff_ns; + u64 hwtstamp_ns; + u64 tsf64; + u32 tstamp; + u32 duration; + u64 ext; + u64 wraps; + s64 tsf_offset; + u64 rebase_cnt; + u32 epoch; + u32 pad2; + u64 sample_cycle; + u64 sample_ns; + u64 tc_cycle_last; + u64 tc_nsec; + u64 tc_frac; + u64 adj_seq; +}; + +struct ath9k_ptp_handoff_tx_meta { + u32 tx_info_flags; + u16 tx_status; + u16 tx_flags; + u8 tx_hw_queue; + u8 txq_qnum; + s8 txq_mac80211_qnum; + u8 tx_qid; + u8 tx_tid; + u8 tx_rateindex; + u8 tx_shortretry; + u8 tx_longretry; + s8 tx_rssi; + u32 txq_depth; + u32 txq_ampdu_depth; + u32 txq_pending_frames; + u16 txq_aifs; + u16 txq_cwmin; + u16 txq_cwmax; + u32 txq_burst_time; + u32 txq_ready_time; + u8 bcnq_qnum; + u16 bcnq_aifs; + u16 bcnq_cwmin; + u16 bcnq_cwmax; + u32 bcnq_burst_time; + u32 bcnq_ready_time; +}; + +struct ath9k_ptp_handoff_rx_meta { + u32 rx_rxs_flags; + u32 rx_ampdu_reference; + u16 rx_enc_flags; + u8 rx_rs_status; + u8 rx_rs_flags; + u8 rx_phyerr; + s8 rx_rssi; + s8 rx_signal; + u8 rx_keyix; + u8 rx_rate; + u8 rx_rate_idx; + u8 rx_antenna; + u8 rx_bw; + u8 rx_nss; + u8 rx_chains; + u8 rx_more; + u8 rx_isaggr; + u8 rx_firstaggr; + u8 rx_moreaggr; + u8 rx_num_delims; +}; + struct ath_softc { struct ieee80211_hw *hw; struct device *dev; @@ -1036,7 +1394,6 @@ struct ath_softc { u8 gtt_cnt; u32 intrstatus; - // focus: wiptp ktime_t intrtstamp; u16 ps_flags; /* PS_* */ @@ -1045,19 +1402,315 @@ struct ath_softc { short nbcnvifs; unsigned long ps_usecount; - // focus: wiptp spinlock_t systim_lock; struct cyclecounter cc; struct timecounter tc; + atomic64_t tsf64_last; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; u32 cc_mult; - struct hrtimer off_timer; - ktime_t off_interval; - u32 off_counter; - s64 off_last; - u64 off_base_time; - u64 ptp_dirtyts; + s64 ptp_last_adjtime; + u64 ptp_last_adjtime_ns; + u64 ptp_last_adjtime_pre_ns; + u64 ptp_last_adjtime_post_ns; + s64 ptp_last_adjtime_expected_ns; + u64 ptp_last_adjtime_cycle; + u64 ptp_last_adjtime_cycle_last; + u64 ptp_rx_drop_cnt; + u64 ptp_epoch_drop_cnt; + u64 ptp_rx_hwts_cnt; + u64 ptp_rx_parse_attempt_cnt; + u64 ptp_rx_parse_miss_cnt; + u8 ptp_rx_parse_last_reason; + u8 ptp_rx_parse_last_msgtype; + u16 ptp_rx_parse_last_seqid; + u16 ptp_rx_parse_last_skb_len; + u16 ptp_rx_parse_last_hdrlen; + u16 ptp_rx_parse_last_payload_len; + u16 ptp_rx_parse_last_snap_off; + u16 ptp_rx_parse_last_ptp_off; + u16 ptp_rx_parse_last_ptp_len; + u16 ptp_rx_parse_last_ethertype; + u16 ptp_rx_parse_last_sport; + u16 ptp_rx_parse_last_dport; + u16 ptp_rx_parse_last_fc; + u16 ptp_rx_parse_last_seq_ctrl; + u8 ptp_rx_parse_last_ip_version; + u8 ptp_rx_parse_last_ip_proto; + u8 ptp_rx_parse_last_ihl; + u8 ptp_rx_parse_last_bytes_len; + u8 ptp_rx_parse_last_bytes[ATH9K_PTP_RX_PARSE_BYTES]; + u64 ptp_rx_ptp_seen; + u64 ptp_rx_hwts_done; + u64 ptp_rx_hwts_miss; + u32 ptp_wrap_glitch_thresh; + u32 ptp_rebase_drop_ns; + u64 ptp_rebase_drop_until_ns; + u64 ptp_rebase_drop_cnt; + u64 ptp_rebase_rx_reject_cnt; + u64 ptp_rebase_tx_reject_cnt; + u64 ptp_rebase_fallback_cnt; + u32 ptp_guard_timeout_ms; + u32 ptp_guard_after_event_ms; + u64 ptp_guard_until_ns; + u16 ptp_guard_seqid; + u8 ptp_guard_inflight; + u64 ptp_guard_block_cnt; + u64 ptp_guard_timeout_cnt; + u64 ptp_guard_block_last_ns; + u64 ptp_last_event_ns; + u64 ptp_last_event_rx_ns; + u64 ptp_last_event_tx_ns; + u64 ptp_settsf_cnt; + u64 ptp_settsf_last_ns; + u64 ptp_settsf_last_delta_ns; + s64 ptp_settsf_last_jump_ns; + u32 ptp_settsf_delta_thresh_us; + u64 ptp_settsf_delta_skip_cnt; + u8 ptp_settsf_dur_filter_enable; + u32 ptp_settsf_dur_pct_over_avg; + u8 ptp_settsf_dur_hist_max; + u64 ptp_settsf_dur_drop_cnt; + u64 ptp_settsf_dur_pass_cnt; + u32 ptp_settsf_dur_last_ns; + u32 ptp_settsf_dur_last_avg_ns; + u32 ptp_settsf_dur_last_med_ns; + u32 ptp_settsf_dur_last_thresh_ns; +#define ATH9K_PTP_SETTSF_DUR_HIST_LEN 64 + u32 ptp_settsf_dur_hist[ATH9K_PTP_SETTSF_DUR_HIST_LEN]; + u8 ptp_settsf_dur_hist_idx; + u8 ptp_settsf_dur_hist_cnt; + u32 ptp_settsf_write_pct_over_avg; + u8 ptp_settsf_write_hist_max; + u32 ptp_settsf_write_last_ns; + u32 ptp_settsf_write_last_avg_ns; + u32 ptp_settsf_write_last_cap_ns; + u32 ptp_settsf_write_hist[ATH9K_PTP_SETTSF_DUR_HIST_LEN]; + u8 ptp_settsf_write_hist_idx; + u8 ptp_settsf_write_hist_cnt; + seqcount_t ptp_epoch_seq; + u32 ptp_epoch; + s64 ptp_epoch_offset_us; + s64 ptp_epoch_offset_rem; + u64 ptp_epoch_tsf; + u32 ptp_rx_wrap_last; + u64 ptp_rx_wrap_ext; + u8 ptp_rx_wrap_valid; + u32 ptp_rx_wrap_epoch; + u32 ptp_rx_wrap_last_prev; + u32 ptp_rx_wrap_delta; + s32 ptp_rx_wrap_delta_s; + u64 ptp_rx_wrap_prev_ext; + u32 ptp_tx_wrap_last; + u64 ptp_tx_wrap_ext; + u8 ptp_tx_wrap_valid; + u32 ptp_tx_wrap_epoch; + u32 ptp_tx_wrap_last_prev; + u32 ptp_tx_wrap_delta; + s32 ptp_tx_wrap_delta_s; + u64 ptp_tx_wrap_prev_ext; + u64 ptp_wrap_rebase_cnt; + u64 ptp_wrap_rebase_tsf; + u32 ptp_raw_tsf_last; + u64 ptp_raw_tsf_wraps; + u64 ptp_raw_tsf_rebase_seen; + u8 ptp_raw_tsf_valid; + s64 ptp_rebase_old_offset_ns; + s64 ptp_rebase_new_offset_ns; + s64 ptp_rebase_anchor_rem_ns; + long ptp_last_adjfine; + u64 ptp_last_adjfine_ns; + u64 ptp_last_adjfine_pre_mult; + u64 ptp_last_adjfine_post_mult; + u64 ptp_adj_seq; + u64 ptp_last_rx_hwtstamp_ns; + u64 ptp_stall_last_rx_ns; + s64 ptp_stall_last_rx_delta_ns; + u64 ptp_stall_rx_cnt; + u64 ptp_sync_dup_drop_cnt; + u64 ptp_sync_dup_last_ext; + u64 ptp_last_rx_mactime; + u64 ptp_last_rx_tsf64; + u32 ptp_last_rx_tstamp; + u32 ptp_sync_dup_window_us; + u32 ptp_sync_dup_last_delta_us; + u32 ptp_sta_hidden_step_thresh_ns; + u32 ptp_infra_reanchor_guard_ns; + u8 ptp_infra_reanchor_enable; + s64 ptp_last_rx_tsf_offset; + u64 ptp_last_rx_rebase; + u8 ptp_last_rx_msgtype; + u8 ptp_sync_dup_valid; + u8 ptp_sync_dup_last_src[ETH_ALEN]; + u16 ptp_last_rx_seqid; + u16 ptp_sync_dup_last_seqid; + u64 ptp_sta_hidden_step_cnt; + u64 ptp_sta_hidden_step_last_host_ns; + u64 ptp_sta_hidden_step_last_tsf; + s64 ptp_sta_hidden_step_last_delta_host_ns; + s64 ptp_sta_hidden_step_last_delta_tsf_ns; + s64 ptp_sta_hidden_step_last_corr_ns; + u64 ptp_sta_hidden_step_last_abs_ns; + u64 ptp_sta_hidden_step_last_reanchor_ns; + u64 ptp_sta_hidden_step_backwards_cnt; + s64 ptp_sta_hidden_step_backwards_delta_host_ns; + s64 ptp_sta_hidden_step_backwards_delta_tsf_ns; + u64 ptp_infra_reanchor_reject_cnt; + u64 ptp_infra_reanchor_last_delta_ns; + u64 ptp_last_tx_hwtstamp_ns; + u64 ptp_stall_last_tx_ns; + s64 ptp_stall_last_tx_delta_ns; + u64 ptp_stall_tx_cnt; + u8 ptp_tx_use_duration; + u8 ptp_rx_use_duration; + s64 ptp_last_tx_tsf_offset; + u64 ptp_last_tx_rebase; + u8 ptp_last_tx_msgtype; + u16 ptp_last_tx_seqid; + u8 ptp_stall_enable; + u32 ptp_stall_thresh_ns; + u64 ptp_tx_ptp_seen; + u64 ptp_tx_tstamp_req; + u64 ptp_tx_tstamp_done; + u64 ptp_tx_tstamp_miss; + u64 ptp_tx_suspect_status_cnt; + u64 ptp_tx_suspect_status_last_hwtstamp_ns; + u64 ptp_tx_noack_event_cnt; + u32 ptp_tx_suspect_status_last_tstamp; + u8 ptp_noack_ptp_event_enable; + u8 ptp_tx_suspect_status_last_msgtype; + u8 ptp_tx_suspect_status_last_rateindex; + u8 ptp_tx_suspect_status_last_shortretry; + u8 ptp_tx_suspect_status_last_longretry; + u8 ptp_tx_suspect_status_last_status; + u8 ptp_tx_suspect_status_last_flags; + u8 ptp_tx_noack_last_msgtype; + u16 ptp_tx_suspect_status_last_seqid; + u16 ptp_tx_noack_last_seqid; + u64 ptp_rx_type_seen[ATH9K_PTP_MSGTYPE_MAX]; + u64 ptp_rx_type_done[ATH9K_PTP_MSGTYPE_MAX]; + u64 ptp_rx_type_miss[ATH9K_PTP_MSGTYPE_MAX]; + u64 ptp_tx_type_seen[ATH9K_PTP_MSGTYPE_MAX]; + u64 ptp_tx_type_req[ATH9K_PTP_MSGTYPE_MAX]; + u64 ptp_tx_type_done[ATH9K_PTP_MSGTYPE_MAX]; + u64 ptp_tx_type_miss[ATH9K_PTP_MSGTYPE_MAX]; + u32 ptp_hwtstamp_tx_type; + u32 ptp_hwtstamp_rx_filter; + struct ath9k_ptp_sample ptp_sample_rx, ptp_sample_tx; + struct ath9k_ptp_mon { + struct ath9k_ptp_mon_entry { + u64 last; + u64 max_fwd; + u64 max_back; + atomic64_t back_cnt; + } tsf64, rx_ext, phc_ns; + } ptp_mon; + atomic_t ptp_evtlog_seq; + u8 ptp_evtlog_enable; + u8 ptp_evtlog_ptp_only; + u8 ptp_evtlog_phc; + u8 ptp_settsf_enable; + u32 ptp_evtlog_len; + u32 ptp_evtlog_mask; + struct ath9k_ptp_evtlog_entry *ptp_evtlog; + atomic_t ptp_tc_trace_seq; + u8 ptp_tc_trace_enable; + u32 ptp_tc_trace_len; + u32 ptp_tc_trace_mask; + struct ath9k_ptp_tc_trace_entry *ptp_tc_trace; + atomic_t ptp_handoff_seq; + u32 ptp_handoff_len; + u32 ptp_handoff_mask; + struct ath9k_ptp_handoff_entry *ptp_handoff; + u8 ptp_stack_enable; + u8 ptp_last_stack_event; + u16 ptp_last_stack_len; + s64 ptp_last_stack_arg; + u64 ptp_last_stack_ns; + u32 ptp_last_stack_pid; + char ptp_last_stack_comm[TASK_COMM_LEN]; + unsigned long ptp_last_stack_entries[ATH9K_PTP_STACK_DEPTH]; + u64 ptp_cc_last; + u64 ptp_cc_last_raw; + u32 ptp_cc_last_hi1; + u32 ptp_cc_last_lo; + u32 ptp_cc_last_hi2; + u64 ptp_cc_glitch_cnt; + u64 ptp_cc_glitch_ts_ns; + u64 ptp_cc_glitch_last; + u64 ptp_cc_glitch_new; + s64 ptp_cc_glitch_delta; + u64 ptp_cc_glitch_raw; + s64 ptp_cc_glitch_offset; + u32 ptp_cc_glitch_hi1; + u32 ptp_cc_glitch_lo; + u32 ptp_cc_glitch_hi2; + u32 ptp_cc_glitch_pid; + char ptp_cc_glitch_comm[TASK_COMM_LEN]; + u8 ptp_cc_glitch_valid; + u64 ptp_tc_last_host_ns; + u64 ptp_tc_last_ns; + u64 ptp_tc_last_raw; + u64 ptp_tc_anomaly_thresh_ns; + u64 ptp_tc_anomaly_cnt; + u64 ptp_tc_anomaly_ts_ns; + u64 ptp_tc_anomaly_host_ns; + u64 ptp_tc_anomaly_host_last_ns; + u64 ptp_tc_anomaly_ns; + u64 ptp_tc_anomaly_ns_last; + u64 ptp_tc_anomaly_cycle_now; + u64 ptp_tc_anomaly_cycle_last; + u64 ptp_tc_anomaly_raw_now; + u64 ptp_tc_anomaly_raw_last; + u64 ptp_tc_anomaly_adj_seq; + s64 ptp_tc_anomaly_host_delta_ns; + s64 ptp_tc_anomaly_ns_delta_ns; + s64 ptp_tc_anomaly_cycle_delta; + s64 ptp_tc_anomaly_raw_delta; + s64 ptp_tc_anomaly_offset_ns; + s64 ptp_tc_anomaly_mismatch_ns; + u64 ptp_tc_anomaly_ns_offset; + u64 ptp_tc_anomaly_cc_mult; + u64 ptp_tc_anomaly_cc_mask; + u32 ptp_tc_anomaly_cc_shift; + u8 ptp_tc_anomaly_source; + u8 ptp_tc_anomaly_reason; + u8 ptp_tc_anomaly_valid; + u64 ptp_hwt_last_ns; + u64 ptp_hwt_last_tc_nsec; + u64 ptp_hwt_anomaly_cnt; + u64 ptp_hwt_anomaly_ts_ns; + u64 ptp_hwt_anomaly_prev_ns; + u64 ptp_hwt_anomaly_ns; + u64 ptp_hwt_anomaly_prev_tc_nsec; + u64 ptp_hwt_anomaly_tc_nsec; + u64 ptp_hwt_anomaly_cycle; + u64 ptp_hwt_anomaly_cycle_adj; + u64 ptp_hwt_anomaly_tc_cycle_last; + u64 ptp_hwt_anomaly_tsf64; + u64 ptp_hwt_anomaly_adj_seq; + u64 ptp_hwt_anomaly_rebase_cnt; + s64 ptp_hwt_anomaly_offset_ns; + s64 ptp_hwt_anomaly_offset_rem_ns; + u32 ptp_hwt_anomaly_epoch; + u8 ptp_hwt_anomaly_reason; + u8 ptp_hwt_anomaly_valid; + u64 ptp_tc_mutation_cnt; + u64 ptp_tc_mutation_ts_ns; + u64 ptp_tc_mutation_target_ns; + s64 ptp_tc_mutation_delta_ns; + u64 ptp_tc_mutation_pre_tc_nsec; + u64 ptp_tc_mutation_post_tc_nsec; + u64 ptp_tc_mutation_pre_cycle_last; + u64 ptp_tc_mutation_post_cycle_last; + u64 ptp_tc_mutation_pre_frac; + u64 ptp_tc_mutation_post_frac; + u64 ptp_tc_mutation_adj_seq; + u64 ptp_tc_mutation_cc_mult; + u32 ptp_tc_mutation_cc_shift; + u32 ptp_tc_mutation_shift_arg; + u8 ptp_tc_mutation_source; + u8 ptp_tc_mutation_valid; struct ath_rx rx; struct ath_tx tx; @@ -1114,6 +1767,139 @@ struct ath_softc { #endif }; +/* Track forward/backward deltas for a monitored value. */ +static inline void ath9k_ptp_mon_update(struct ath9k_ptp_mon_entry *entry, + u64 val) +{ + u64 last = READ_ONCE(entry->last); + + if (!last) { + WRITE_ONCE(entry->last, val); + return; + } + + if (val >= last) { + u64 delta = val - last; + + if (delta > READ_ONCE(entry->max_fwd)) + WRITE_ONCE(entry->max_fwd, delta); + } else { + u64 delta = last - val; + + if (delta > READ_ONCE(entry->max_back)) + WRITE_ONCE(entry->max_back, delta); + atomic64_inc(&entry->back_cnt); + } + + WRITE_ONCE(entry->last, val); +} + +static inline void ath9k_ptp_tc_anomaly_record(struct ath_softc *sc, + u8 source, u8 reason, + u64 host_ns, u64 host_last_ns, + u64 ns_now, u64 ns_last, + u64 cycle_now, u64 cycle_last, + u64 raw_now, u64 raw_last, + s64 offset_ns, + s64 mismatch_ns, + u64 ns_offset) +{ + WRITE_ONCE(sc->ptp_tc_anomaly_ts_ns, host_ns); + WRITE_ONCE(sc->ptp_tc_anomaly_host_ns, host_ns); + WRITE_ONCE(sc->ptp_tc_anomaly_host_last_ns, host_last_ns); + WRITE_ONCE(sc->ptp_tc_anomaly_host_delta_ns, + (s64)host_ns - (s64)host_last_ns); + WRITE_ONCE(sc->ptp_tc_anomaly_ns, ns_now); + WRITE_ONCE(sc->ptp_tc_anomaly_ns_last, ns_last); + WRITE_ONCE(sc->ptp_tc_anomaly_ns_delta_ns, + (s64)ns_now - (s64)ns_last); + WRITE_ONCE(sc->ptp_tc_anomaly_cycle_now, cycle_now); + WRITE_ONCE(sc->ptp_tc_anomaly_cycle_last, cycle_last); + WRITE_ONCE(sc->ptp_tc_anomaly_cycle_delta, + (s64)cycle_now - (s64)cycle_last); + WRITE_ONCE(sc->ptp_tc_anomaly_raw_now, raw_now); + WRITE_ONCE(sc->ptp_tc_anomaly_raw_last, raw_last); + WRITE_ONCE(sc->ptp_tc_anomaly_raw_delta, + (s64)raw_now - (s64)raw_last); + WRITE_ONCE(sc->ptp_tc_anomaly_offset_ns, offset_ns); + WRITE_ONCE(sc->ptp_tc_anomaly_mismatch_ns, mismatch_ns); + WRITE_ONCE(sc->ptp_tc_anomaly_ns_offset, ns_offset); + WRITE_ONCE(sc->ptp_tc_anomaly_adj_seq, READ_ONCE(sc->ptp_adj_seq)); + WRITE_ONCE(sc->ptp_tc_anomaly_cc_mult, sc->cc.mult); + WRITE_ONCE(sc->ptp_tc_anomaly_cc_mask, sc->cc.mask); + WRITE_ONCE(sc->ptp_tc_anomaly_cc_shift, sc->cc.shift); + WRITE_ONCE(sc->ptp_tc_anomaly_source, source); + WRITE_ONCE(sc->ptp_tc_anomaly_reason, reason); + WRITE_ONCE(sc->ptp_tc_anomaly_valid, 1); + WRITE_ONCE(sc->ptp_tc_anomaly_cnt, + READ_ONCE(sc->ptp_tc_anomaly_cnt) + 1); +} + +static inline void ath9k_ptp_hwt_anomaly_record(struct ath_softc *sc, + u8 reason, + u64 prev_ns, u64 ns_now, + u64 prev_tc_nsec, + u64 tc_nsec, + u64 cycle, + u64 cycle_adj, + u64 tc_cycle_last, + u64 tsf64, + s64 offset_ns, + s64 offset_rem_ns, + u32 epoch, + u64 rebase_cnt) +{ + WRITE_ONCE(sc->ptp_hwt_anomaly_ts_ns, ktime_get_ns()); + WRITE_ONCE(sc->ptp_hwt_anomaly_prev_ns, prev_ns); + WRITE_ONCE(sc->ptp_hwt_anomaly_ns, ns_now); + WRITE_ONCE(sc->ptp_hwt_anomaly_prev_tc_nsec, prev_tc_nsec); + WRITE_ONCE(sc->ptp_hwt_anomaly_tc_nsec, tc_nsec); + WRITE_ONCE(sc->ptp_hwt_anomaly_cycle, cycle); + WRITE_ONCE(sc->ptp_hwt_anomaly_cycle_adj, cycle_adj); + WRITE_ONCE(sc->ptp_hwt_anomaly_tc_cycle_last, tc_cycle_last); + WRITE_ONCE(sc->ptp_hwt_anomaly_tsf64, tsf64); + WRITE_ONCE(sc->ptp_hwt_anomaly_offset_ns, offset_ns); + WRITE_ONCE(sc->ptp_hwt_anomaly_offset_rem_ns, offset_rem_ns); + WRITE_ONCE(sc->ptp_hwt_anomaly_epoch, epoch); + WRITE_ONCE(sc->ptp_hwt_anomaly_adj_seq, READ_ONCE(sc->ptp_adj_seq)); + WRITE_ONCE(sc->ptp_hwt_anomaly_rebase_cnt, rebase_cnt); + WRITE_ONCE(sc->ptp_hwt_anomaly_reason, reason); + WRITE_ONCE(sc->ptp_hwt_anomaly_valid, 1); + WRITE_ONCE(sc->ptp_hwt_anomaly_cnt, + READ_ONCE(sc->ptp_hwt_anomaly_cnt) + 1); +} + +static inline void ath9k_ptp_tc_mutation_record(struct ath_softc *sc, + u8 source, + u64 target_ns, + s64 delta_ns, + u32 shift_arg, + u64 pre_tc_nsec, + u64 pre_cycle_last, + u64 pre_frac, + u64 post_tc_nsec, + u64 post_cycle_last, + u64 post_frac) +{ + WRITE_ONCE(sc->ptp_tc_mutation_ts_ns, ktime_get_ns()); + WRITE_ONCE(sc->ptp_tc_mutation_target_ns, target_ns); + WRITE_ONCE(sc->ptp_tc_mutation_delta_ns, delta_ns); + WRITE_ONCE(sc->ptp_tc_mutation_shift_arg, shift_arg); + WRITE_ONCE(sc->ptp_tc_mutation_pre_tc_nsec, pre_tc_nsec); + WRITE_ONCE(sc->ptp_tc_mutation_post_tc_nsec, post_tc_nsec); + WRITE_ONCE(sc->ptp_tc_mutation_pre_cycle_last, pre_cycle_last); + WRITE_ONCE(sc->ptp_tc_mutation_post_cycle_last, post_cycle_last); + WRITE_ONCE(sc->ptp_tc_mutation_pre_frac, pre_frac); + WRITE_ONCE(sc->ptp_tc_mutation_post_frac, post_frac); + WRITE_ONCE(sc->ptp_tc_mutation_adj_seq, READ_ONCE(sc->ptp_adj_seq)); + WRITE_ONCE(sc->ptp_tc_mutation_cc_mult, sc->cc.mult); + WRITE_ONCE(sc->ptp_tc_mutation_cc_shift, sc->cc.shift); + WRITE_ONCE(sc->ptp_tc_mutation_source, source); + WRITE_ONCE(sc->ptp_tc_mutation_valid, 1); + WRITE_ONCE(sc->ptp_tc_mutation_cnt, + READ_ONCE(sc->ptp_tc_mutation_cnt) + 1); +} + /********/ /* TX99 */ /********/ @@ -1187,10 +1973,1274 @@ static inline int ath_ahb_init(void) { return 0; }; static inline void ath_ahb_exit(void) {}; #endif -// focus: wiptp +/* Initialize PTP state and register the PHC. */ void ath9k_ptp_init(struct ath_softc *sc); +/* Unregister the PHC and clear callbacks. */ void ath9k_ptp_remove(struct ath_softc *sc); -void ath9k_cyc2hwtstamp(struct ath_softc *sc, struct skb_shared_hwtstamps *hwtstamps, u32 cycle); +/* Re-anchor PHC/RX/TX state to the current raw TSF after mode changes. */ +void ath9k_ptp_mode_reset(struct ath_softc *sc); +/* Re-anchor PHC/RX/TX state after infrastructure beacon sync. */ +bool ath9k_ptp_infra_reanchor(struct ath_softc *sc, u64 tsf, u32 read_dur_ns); +void ath9k_ptp_sta_hidden_step_check(struct ath_softc *sc, u64 tsf, + u64 host_ns); +/* Rebase wrap extenders and TSF offset after TSF change. */ +void ath9k_ptp_wrap_rebase(struct ath_softc *sc, u64 tsf); +void ath9k_ptp_guard_delay_req(struct ath_softc *sc, u16 seq_id); +void ath9k_ptp_guard_delay_resp(struct ath_softc *sc, u16 seq_id); +void ath9k_ptp_guard_event(struct ath_softc *sc); +u64 ath9k_ptp_tc_read(struct ath_softc *sc, u8 reason); +static inline bool ath9k_ptp_rebase_quarantine_active(struct ath_softc *sc) +{ + u64 until_ns; + u64 now_ns; + + if (!sc) + return false; + + until_ns = READ_ONCE(sc->ptp_rebase_drop_until_ns); + if (!until_ns) + return false; + + now_ns = ktime_get_ns(); + if (now_ns < until_ns) + return true; + + WRITE_ONCE(sc->ptp_rebase_drop_until_ns, 0); + return false; +} + +static inline bool ath9k_ptp_guard_allow_settsf(struct ath_softc *sc) +{ + u64 now_ns; + u64 guard_until; + + if (!sc) + return true; + + if (!READ_ONCE(sc->ptp_settsf_enable)) + return false; + + now_ns = ktime_get_ns(); + guard_until = READ_ONCE(sc->ptp_guard_until_ns); + if (guard_until) { + if (now_ns > guard_until) { + if (READ_ONCE(sc->ptp_guard_inflight)) + WRITE_ONCE(sc->ptp_guard_timeout_cnt, + READ_ONCE(sc->ptp_guard_timeout_cnt) + 1); + WRITE_ONCE(sc->ptp_guard_inflight, 0); + WRITE_ONCE(sc->ptp_guard_until_ns, 0); + return true; + } + WRITE_ONCE(sc->ptp_guard_block_cnt, + READ_ONCE(sc->ptp_guard_block_cnt) + 1); + WRITE_ONCE(sc->ptp_guard_block_last_ns, now_ns); + return false; + } + + return true; +} + +static inline void ath9k_ptp_epoch_snapshot(struct ath_softc *sc, + u64 *tsf64, + s64 *offset_us, + s64 *offset_rem, + u32 *epoch, + u32 *read_dur_ns) +{ + unsigned int seq; + u64 tsf_snap = 0; + u32 read_dur = 0; + s64 off_us = 0; + s64 off_rem = 0; + u32 ep = 0; + + do { + u64 read_start_ns = 0; + u64 read_end_ns = 0; + u64 dur_ns = 0; + + seq = read_seqcount_begin(&sc->ptp_epoch_seq); + ep = sc->ptp_epoch; + off_us = sc->ptp_epoch_offset_us; + off_rem = sc->ptp_epoch_offset_rem; + if (tsf64) { + if (read_dur_ns) { + read_start_ns = ktime_get_ns(); + tsf_snap = ath9k_hw_gettsf64(sc->sc_ah); + read_end_ns = ktime_get_ns(); + dur_ns = read_end_ns - read_start_ns; + read_dur = dur_ns > U32_MAX ? U32_MAX : (u32)dur_ns; + } else { + tsf_snap = ath9k_hw_gettsf64(sc->sc_ah); + } + } + } while (read_seqcount_retry(&sc->ptp_epoch_seq, seq)); + + if (tsf64) + *tsf64 = tsf_snap; + if (read_dur_ns) + *read_dur_ns = read_dur; + if (offset_us) + *offset_us = off_us; + if (offset_rem) + *offset_rem = off_rem; + if (epoch) + *epoch = ep; +} + +/* Convert a cycle value to hwtstamp without sample data. */ +void ath9k_cyc2hwtstamp(struct ath_softc *sc, struct skb_shared_hwtstamps *hwtstamps, u64 cycle); +/* Convert a cycle value to hwtstamp and collect sample data. */ +void ath9k_cyc2hwtstamp_sample(struct ath_softc *sc, + struct skb_shared_hwtstamps *hwtstamps, + u64 cycle, + struct ath9k_ptp_sample *sample); +/* Heuristic check for PTP Ethernet payload in an 802.11 data frame. */ +static inline bool ath9k_ptp_parse_skb_diag(const struct sk_buff *skb, + u8 *msg_type, u16 *seq_id, + u16 *ptp_off, u16 *ptp_len_out, + struct ath9k_ptp_parse_info *info) +{ + struct ieee80211_hdr *hdr; + u16 hdrlen; + const u8 *payload; + const u8 *snap; + const u8 *ptp = NULL; + u16 payload_len; + u16 snap_off; + u16 ethertype; + u16 l3_off; + u16 max_scan; + u16 i; + u16 ptp_len = 0; + u8 reason = ATH9K_PTP_PARSE_OK; + + if (msg_type) + *msg_type = 0xFF; + if (seq_id) + *seq_id = 0xFFFF; + if (ptp_off) + *ptp_off = 0; + if (ptp_len_out) + *ptp_len_out = 0; + if (info) { + memset(info, 0, sizeof(*info)); + info->reason = ATH9K_PTP_PARSE_OK; + } + + if (!skb) { + reason = ATH9K_PTP_PARSE_NULL_SKB; + goto fail; + } + + if (skb->len < sizeof(*hdr)) { + reason = ATH9K_PTP_PARSE_SHORT_HDR; + if (info) + info->payload_len = skb->len; + goto fail; + } + + hdr = (struct ieee80211_hdr *)skb->data; + if (!ieee80211_is_data_present(hdr->frame_control)) { + reason = ATH9K_PTP_PARSE_NONDATA; + goto fail; + } + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (info) { + info->hdrlen = hdrlen; + info->payload_len = (skb->len > hdrlen) ? skb->len - hdrlen : 0; + } + if (skb->len < hdrlen + 8) { + reason = ATH9K_PTP_PARSE_SHORT_PAYLOAD; + goto fail; + } + + payload = skb->data + hdrlen; + payload_len = skb->len - hdrlen; + if (info) + info->payload_len = payload_len; + + /* Find LLC/SNAP header within first bytes (handles mesh control). */ + max_scan = min_t(u16, payload_len, 64); + snap = NULL; + for (i = 0; i + 8 <= max_scan; i++) { + if (payload[i] == 0xAA && payload[i + 1] == 0xAA && + payload[i + 2] == 0x03 && + payload[i + 3] == 0x00 && payload[i + 4] == 0x00 && + payload[i + 5] == 0x00) { + snap = payload + i; + break; + } + } + if (!snap) { + reason = ATH9K_PTP_PARSE_NO_SNAP; + goto fail; + } + + snap_off = snap - payload; + if (info) + info->snap_off = snap_off; + if (payload_len < snap_off + 8) { + reason = ATH9K_PTP_PARSE_SNAP_SHORT; + goto fail; + } + + ethertype = (snap[6] << 8) | snap[7]; + l3_off = snap_off + 8; + if (info) + info->ethertype = ethertype; + + /* Optional single VLAN tag. */ + if (ethertype == 0x8100 && payload_len >= snap_off + 12) { + ethertype = (snap[10] << 8) | snap[11]; + l3_off = snap_off + 12; + if (info) + info->ethertype = ethertype; + } + + /* L2 PTP (0x88F7). */ + if (ethertype == 0x88F7) { + ptp = payload + l3_off; + ptp_len = payload_len - l3_off; + goto out; + } + + /* IPv4/UDP PTP (ports 319/320). */ + if (ethertype == 0x0800 && payload_len >= l3_off + 20) { + const u8 *ip = payload + l3_off; + u8 version = ip[0] >> 4; + u8 ihl = (ip[0] & 0x0F) * 4; + u8 proto = ip[9]; + + if (info) { + info->ip_version = version; + info->ihl = ihl; + info->ip_proto = proto; + } + if (version == 4 && ihl >= 20 && + payload_len >= l3_off + ihl + 8 && proto == 17) { + const u8 *udp = payload + l3_off + ihl; + u16 sport = (udp[0] << 8) | udp[1]; + u16 dport = (udp[2] << 8) | udp[3]; + + if (info) { + info->sport = sport; + info->dport = dport; + } + if (sport == 319 || sport == 320 || + dport == 319 || dport == 320) { + ptp = udp + 8; + ptp_len = payload_len - (l3_off + ihl + 8); + goto out; + } + reason = ATH9K_PTP_PARSE_IPV4_NOT_PTP_PORT; + goto fail; + } + + reason = proto == 17 ? ATH9K_PTP_PARSE_IPV4_SHORT : + ATH9K_PTP_PARSE_IPV4_NOT_UDP; + goto fail; + } + if (ethertype == 0x0800) { + reason = ATH9K_PTP_PARSE_IPV4_SHORT; + goto fail; + } + + /* IPv6/UDP PTP (ports 319/320), no extension headers handled. */ + if (ethertype == 0x86DD && payload_len >= l3_off + 40) { + const u8 *ip6 = payload + l3_off; + u8 nexthdr = ip6[6]; + + if (info) { + info->ip_version = 6; + info->ip_proto = nexthdr; + } + if (nexthdr == 17 && payload_len >= l3_off + 48) { + const u8 *udp = payload + l3_off + 40; + u16 sport = (udp[0] << 8) | udp[1]; + u16 dport = (udp[2] << 8) | udp[3]; + + if (info) { + info->sport = sport; + info->dport = dport; + } + if (sport == 319 || sport == 320 || + dport == 319 || dport == 320) { + ptp = udp + 8; + ptp_len = payload_len - (l3_off + 48); + goto out; + } + reason = ATH9K_PTP_PARSE_IPV6_NOT_PTP_PORT; + goto fail; + } + + reason = nexthdr == 17 ? ATH9K_PTP_PARSE_IPV6_SHORT : + ATH9K_PTP_PARSE_IPV6_NOT_UDP; + goto fail; + } + if (ethertype == 0x86DD) { + reason = ATH9K_PTP_PARSE_IPV6_SHORT; + goto fail; + } + + reason = ATH9K_PTP_PARSE_UNKNOWN_ETHERTYPE; + goto fail; + +out: + if (!ptp || ptp_len < 1) { + reason = ATH9K_PTP_PARSE_PTP_SHORT; + goto fail; + } + if (msg_type) + *msg_type = ptp[0] & 0x0F; + if (seq_id && ptp_len >= 32) + *seq_id = (ptp[30] << 8) | ptp[31]; + if (ptp_off) + *ptp_off = (u16)(ptp - skb->data); + if (ptp_len_out) + *ptp_len_out = ptp_len; + if (info) { + info->reason = ATH9K_PTP_PARSE_OK; + info->ptp_off = (u16)(ptp - skb->data); + info->ptp_len = ptp_len; + } + return true; + +fail: + if (info) + info->reason = reason; + return false; +} + +static inline bool ath9k_ptp_parse_skb_full(const struct sk_buff *skb, + u8 *msg_type, u16 *seq_id, + u16 *ptp_off, u16 *ptp_len_out) +{ + return ath9k_ptp_parse_skb_diag(skb, msg_type, seq_id, + ptp_off, ptp_len_out, NULL); +} + +static inline bool ath9k_ptp_parse_skb(const struct sk_buff *skb, + u8 *msg_type, u16 *seq_id) +{ + return ath9k_ptp_parse_skb_full(skb, msg_type, seq_id, NULL, NULL); +} + +static inline bool ath9k_ptp_is_ptp_skb(const struct sk_buff *skb) +{ + return ath9k_ptp_parse_skb(skb, NULL, NULL); +} + +/* Copy sample fields into an evtlog entry (or clear if missing). */ +static inline void ath9k_ptp_evtlog_fill_sample(struct ath9k_ptp_evtlog_entry *entry, + const struct ath9k_ptp_sample *sample) +{ + if (!sample) { + WRITE_ONCE(entry->sample_cycle, 0); + WRITE_ONCE(entry->sample_ns, 0); + WRITE_ONCE(entry->sample_rebase_cnt, 0); + WRITE_ONCE(entry->sample_tsf_offset, 0); + WRITE_ONCE(entry->sample_epoch, 0); + WRITE_ONCE(entry->sample_epoch_valid, 0); + WRITE_ONCE(entry->tc_cycle_last, 0); + WRITE_ONCE(entry->tc_nsec, 0); + WRITE_ONCE(entry->tc_frac, 0); + return; + } + + WRITE_ONCE(entry->sample_cycle, sample->cycle); + WRITE_ONCE(entry->sample_ns, sample->ns); + WRITE_ONCE(entry->sample_rebase_cnt, sample->rebase_cnt); + WRITE_ONCE(entry->sample_tsf_offset, sample->tsf_offset); + WRITE_ONCE(entry->sample_epoch, sample->epoch); + WRITE_ONCE(entry->sample_epoch_valid, sample->epoch_valid); + WRITE_ONCE(entry->tc_cycle_last, sample->tc_cycle_last); + WRITE_ONCE(entry->tc_nsec, sample->tc_nsec); + WRITE_ONCE(entry->tc_frac, sample->tc_frac); +} + +/* Copy header/payload bytes from skb into an evtlog entry. */ +static inline void ath9k_ptp_evtlog_copy_skb(struct ath9k_ptp_evtlog_entry *entry, + const struct sk_buff *skb) +{ + u16 hdr_len = 0; + u16 payload_len = 0; + u16 copy_len; + u8 msg_type = 0xFF; + u16 seq_id = 0xFFFF; + bool is_ptp = false; + u16 ptp_off = 0; + u16 ptp_len = 0; + u64 ptp_ts_ns = 0; + u8 ptp_ts_valid = 0; + + WRITE_ONCE(entry->hdr_len, 0); + WRITE_ONCE(entry->payload_len, 0); + WRITE_ONCE(entry->ptp_msgtype, 0xFF); + WRITE_ONCE(entry->ptp_seqid, 0xFFFF); + WRITE_ONCE(entry->ptp_valid, 0); + WRITE_ONCE(entry->ptp_ts_ns, 0); + WRITE_ONCE(entry->ptp_ts_valid, 0); + + if (!skb) + return; + + is_ptp = ath9k_ptp_parse_skb_full(skb, &msg_type, &seq_id, + &ptp_off, &ptp_len); + if (is_ptp) { + WRITE_ONCE(entry->ptp_msgtype, msg_type); + WRITE_ONCE(entry->ptp_seqid, seq_id); + WRITE_ONCE(entry->ptp_valid, 1); + } + if (is_ptp && (msg_type == 0x8 || msg_type == 0x9) && ptp_len >= 44) { + u8 ts[10]; + + if (!skb_copy_bits(skb, ptp_off + 34, ts, sizeof(ts))) { + u64 sec = ((u64)ts[0] << 40) | + ((u64)ts[1] << 32) | + ((u64)ts[2] << 24) | + ((u64)ts[3] << 16) | + ((u64)ts[4] << 8) | + (u64)ts[5]; + u32 nsec = ((u32)ts[6] << 24) | + ((u32)ts[7] << 16) | + ((u32)ts[8] << 8) | + (u32)ts[9]; + + if (nsec < 1000000000U) { + ptp_ts_ns = sec * 1000000000ULL + nsec; + ptp_ts_valid = 1; + } + } + } + WRITE_ONCE(entry->ptp_ts_ns, ptp_ts_ns); + WRITE_ONCE(entry->ptp_ts_valid, ptp_ts_valid); + + if (skb->len >= sizeof(struct ieee80211_hdr)) { + struct ieee80211_hdr hdr; + + if (!skb_copy_bits(skb, 0, &hdr, sizeof(hdr))) + hdr_len = ieee80211_hdrlen(hdr.frame_control); + } + + if (!hdr_len) + hdr_len = min_t(u16, skb->len, ATH9K_PTP_EVTLOG_HDR_LEN); + if (hdr_len > skb->len) + hdr_len = skb->len; + + WRITE_ONCE(entry->hdr_len, hdr_len); + copy_len = min_t(u16, hdr_len, (u16)ATH9K_PTP_EVTLOG_HDR_LEN); + if (copy_len) + skb_copy_bits(skb, 0, entry->hdr, copy_len); + + if (skb->len > hdr_len) { + payload_len = skb->len - hdr_len; + WRITE_ONCE(entry->payload_len, payload_len); + copy_len = min_t(u16, payload_len, + (u16)ATH9K_PTP_EVTLOG_PAYLOAD_LEN); + if (copy_len) + skb_copy_bits(skb, hdr_len, entry->payload, copy_len); + } +} + +/* Copy last captured stack trace into an evtlog entry. */ +static inline void ath9k_ptp_evtlog_copy_stack(struct ath_softc *sc, + struct ath9k_ptp_evtlog_entry *entry) +{ + unsigned long flags; + u16 depth; + + if (!READ_ONCE(sc->ptp_stack_enable)) { + WRITE_ONCE(entry->stack_len, 0); + return; + } + + spin_lock_irqsave(&sc->systim_lock, flags); + depth = sc->ptp_last_stack_len; + if (depth > ATH9K_PTP_STACK_DEPTH) + depth = ATH9K_PTP_STACK_DEPTH; + WRITE_ONCE(entry->stack_len, depth); + if (depth) + memcpy(entry->stack_entries, sc->ptp_last_stack_entries, + depth * sizeof(entry->stack_entries[0])); + spin_unlock_irqrestore(&sc->systim_lock, flags); +} + +/* Add a generic PTP event to the ring buffer. */ +static inline void ath9k_ptp_evtlog_add(struct ath_softc *sc, u8 event, + u64 tsf64, u32 tstamp, u32 duration, + u64 ext, u64 hwtstamp_ns, + const struct ath9k_ptp_sample *sample, + const struct sk_buff *skb) +{ + u32 seq; + s64 tsf_offset; + u32 tstamp_last = 0; + u32 tstamp_delta = 0; + s32 tstamp_delta_s = 0; + u64 ext_prev = 0; + u64 wraps = 0; + u8 wrap_valid = 0; + struct ath9k_ptp_evtlog_entry *entry; + + if (!READ_ONCE(sc->ptp_evtlog_enable) || !sc->ptp_evtlog) + return; + + seq = (u32)atomic_inc_return(&sc->ptp_evtlog_seq); + entry = &sc->ptp_evtlog[seq & sc->ptp_evtlog_mask]; + + if (ext) + wraps = ext >> 32; + tsf_offset = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + if (event == ATH9K_PTP_EVT_RX) { + wrap_valid = READ_ONCE(sc->ptp_rx_wrap_valid); + tstamp_last = READ_ONCE(sc->ptp_rx_wrap_last_prev); + tstamp_delta = READ_ONCE(sc->ptp_rx_wrap_delta); + tstamp_delta_s = READ_ONCE(sc->ptp_rx_wrap_delta_s); + ext_prev = READ_ONCE(sc->ptp_rx_wrap_prev_ext); + } else if (event == ATH9K_PTP_EVT_TX) { + wrap_valid = READ_ONCE(sc->ptp_tx_wrap_valid); + tstamp_last = READ_ONCE(sc->ptp_tx_wrap_last_prev); + tstamp_delta = READ_ONCE(sc->ptp_tx_wrap_delta); + tstamp_delta_s = READ_ONCE(sc->ptp_tx_wrap_delta_s); + ext_prev = READ_ONCE(sc->ptp_tx_wrap_prev_ext); + } + + WRITE_ONCE(entry->event, event); + WRITE_ONCE(entry->tsf64, tsf64); + WRITE_ONCE(entry->tstamp, tstamp); + WRITE_ONCE(entry->duration, duration); + WRITE_ONCE(entry->duration_mid, 0); + WRITE_ONCE(entry->tstamp_last, tstamp_last); + WRITE_ONCE(entry->tstamp_delta, tstamp_delta); + WRITE_ONCE(entry->tstamp_delta_s, tstamp_delta_s); + WRITE_ONCE(entry->wrap_valid, wrap_valid); + WRITE_ONCE(entry->settsf_dur_hist_max, + READ_ONCE(sc->ptp_settsf_dur_hist_max)); + WRITE_ONCE(entry->settsf_dur_hist_cnt, + READ_ONCE(sc->ptp_settsf_dur_hist_cnt)); + WRITE_ONCE(entry->ext_prev, ext_prev); + WRITE_ONCE(entry->ext, ext); + WRITE_ONCE(entry->hwtstamp_ns, hwtstamp_ns); + WRITE_ONCE(entry->tsf_offset, tsf_offset); + WRITE_ONCE(entry->phc_tsf, (u64)((s64)tsf64 + tsf_offset)); + WRITE_ONCE(entry->adj_seq, READ_ONCE(sc->ptp_adj_seq)); + WRITE_ONCE(entry->rebases, READ_ONCE(sc->ptp_wrap_rebase_cnt)); + WRITE_ONCE(entry->wraps, wraps); + WRITE_ONCE(entry->anchor_phc, READ_ONCE(sc->sc_ah->ptp_rebase_phc)); + WRITE_ONCE(entry->anchor_valid, + READ_ONCE(sc->sc_ah->ptp_rebase_anchor_valid) ? 1 : 0); + WRITE_ONCE(entry->settime_old_ns, 0); + WRITE_ONCE(entry->settime_new_ns, 0); + WRITE_ONCE(entry->settime_delta, 0); + WRITE_ONCE(entry->settsf_old, 0); + WRITE_ONCE(entry->settsf_new, 0); + WRITE_ONCE(entry->settsf_delta, 0); + WRITE_ONCE(entry->settsf_read_start_ns, 0); + WRITE_ONCE(entry->settsf_read_end_ns, 0); + WRITE_ONCE(entry->settsf_write_start_ns, 0); + WRITE_ONCE(entry->settsf_write_end_ns, 0); + WRITE_ONCE(entry->settsf_read_ns, 0); + WRITE_ONCE(entry->settsf_write_ns, 0); + WRITE_ONCE(entry->settsf_total_ns, 0); + WRITE_ONCE(entry->settsf_epoch, 0); + WRITE_ONCE(entry->settsf_epoch_offset_ns, 0); + WRITE_ONCE(entry->settsf_epoch_valid, 0); + WRITE_ONCE(entry->settsf_read_start_ns, 0); + WRITE_ONCE(entry->settsf_read_end_ns, 0); + WRITE_ONCE(entry->settsf_write_start_ns, 0); + WRITE_ONCE(entry->settsf_write_end_ns, 0); + WRITE_ONCE(entry->settsf_read_ns, 0); + WRITE_ONCE(entry->settsf_write_ns, 0); + WRITE_ONCE(entry->settsf_total_ns, 0); + WRITE_ONCE(entry->settsf_epoch, 0); + WRITE_ONCE(entry->settsf_epoch_offset_ns, 0); + WRITE_ONCE(entry->settsf_epoch_valid, 0); + WRITE_ONCE(entry->tsf_hi1, 0); + WRITE_ONCE(entry->tsf_lo, 0); + WRITE_ONCE(entry->tsf_hi2, 0); + WRITE_ONCE(entry->cc_last, 0); + WRITE_ONCE(entry->cc_new, 0); + WRITE_ONCE(entry->cc_delta, 0); + WRITE_ONCE(entry->cc_mult, sc->cc.mult); + WRITE_ONCE(entry->cc_mask, sc->cc.mask); + WRITE_ONCE(entry->cc_shift, sc->cc.shift); + WRITE_ONCE(entry->stack_len, 0); + ath9k_ptp_evtlog_fill_sample(entry, sample); + ath9k_ptp_evtlog_copy_skb(entry, skb); + WRITE_ONCE(entry->caller_pid, 0); + entry->caller_comm[0] = '\0'; + WRITE_ONCE(entry->seq, seq); + + if (event == ATH9K_PTP_EVT_REBASE) { + WRITE_ONCE(entry->settime_delta, + READ_ONCE(sc->ptp_rebase_old_offset_ns)); + WRITE_ONCE(entry->settsf_delta, + READ_ONCE(sc->ptp_rebase_new_offset_ns)); + WRITE_ONCE(entry->duration_mid, duration / 2); + } +} + +/* Log a settime event into the ring buffer. */ +static inline void ath9k_ptp_evtlog_settime(struct ath_softc *sc, u64 tsf64, + u64 old_ns, u64 new_ns) +{ + u32 seq; + s64 tsf_offset; + u64 ext = 0; + u64 wraps = 0; + struct ath9k_ptp_evtlog_entry *entry; + + if (!READ_ONCE(sc->ptp_evtlog_enable) || !sc->ptp_evtlog) + return; + + ext = READ_ONCE(sc->ptp_rx_wrap_ext); + if (READ_ONCE(sc->ptp_tx_wrap_ext) > ext) + ext = READ_ONCE(sc->ptp_tx_wrap_ext); + if (ext) + wraps = ext >> 32; + tsf_offset = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + + seq = (u32)atomic_inc_return(&sc->ptp_evtlog_seq); + entry = &sc->ptp_evtlog[seq & sc->ptp_evtlog_mask]; + + WRITE_ONCE(entry->event, ATH9K_PTP_EVT_SETTIME); + WRITE_ONCE(entry->tsf64, tsf64); + WRITE_ONCE(entry->tstamp, 0); + WRITE_ONCE(entry->duration, 0); + WRITE_ONCE(entry->tstamp_last, 0); + WRITE_ONCE(entry->tstamp_delta, 0); + WRITE_ONCE(entry->tstamp_delta_s, 0); + WRITE_ONCE(entry->wrap_valid, 0); + WRITE_ONCE(entry->ext_prev, 0); + WRITE_ONCE(entry->ext, ext); + WRITE_ONCE(entry->hwtstamp_ns, 0); + WRITE_ONCE(entry->tsf_offset, tsf_offset); + WRITE_ONCE(entry->phc_tsf, (u64)((s64)tsf64 + tsf_offset)); + WRITE_ONCE(entry->adj_seq, READ_ONCE(sc->ptp_adj_seq)); + WRITE_ONCE(entry->rebases, READ_ONCE(sc->ptp_wrap_rebase_cnt)); + WRITE_ONCE(entry->wraps, wraps); + WRITE_ONCE(entry->anchor_phc, READ_ONCE(sc->sc_ah->ptp_rebase_phc)); + WRITE_ONCE(entry->anchor_valid, + READ_ONCE(sc->sc_ah->ptp_rebase_anchor_valid) ? 1 : 0); + WRITE_ONCE(entry->settime_old_ns, old_ns); + WRITE_ONCE(entry->settime_new_ns, new_ns); + WRITE_ONCE(entry->settime_delta, (s64)new_ns - (s64)old_ns); + WRITE_ONCE(entry->settsf_old, 0); + WRITE_ONCE(entry->settsf_new, 0); + WRITE_ONCE(entry->settsf_delta, 0); + WRITE_ONCE(entry->settsf_read_start_ns, 0); + WRITE_ONCE(entry->settsf_read_end_ns, 0); + WRITE_ONCE(entry->settsf_write_start_ns, 0); + WRITE_ONCE(entry->settsf_write_end_ns, 0); + WRITE_ONCE(entry->settsf_read_ns, 0); + WRITE_ONCE(entry->settsf_write_ns, 0); + WRITE_ONCE(entry->settsf_total_ns, 0); + WRITE_ONCE(entry->settsf_epoch, 0); + WRITE_ONCE(entry->settsf_epoch_offset_ns, 0); + WRITE_ONCE(entry->settsf_epoch_valid, 0); + WRITE_ONCE(entry->tsf_hi1, 0); + WRITE_ONCE(entry->tsf_lo, 0); + WRITE_ONCE(entry->tsf_hi2, 0); + WRITE_ONCE(entry->cc_last, 0); + WRITE_ONCE(entry->cc_new, 0); + WRITE_ONCE(entry->cc_delta, 0); + WRITE_ONCE(entry->cc_mult, sc->cc.mult); + WRITE_ONCE(entry->cc_mask, sc->cc.mask); + WRITE_ONCE(entry->cc_shift, sc->cc.shift); + WRITE_ONCE(entry->stack_len, 0); + ath9k_ptp_evtlog_fill_sample(entry, NULL); + ath9k_ptp_evtlog_copy_skb(entry, NULL); + WRITE_ONCE(entry->caller_pid, 0); + entry->caller_comm[0] = '\0'; + WRITE_ONCE(entry->seq, seq); +} + +/* Log a settsf event into the ring buffer. */ +static inline void ath9k_ptp_evtlog_settsf(struct ath_softc *sc, u64 tsf_old, + u64 tsf_new) +{ + u32 seq; + s64 tsf_offset; + s64 epoch_off_us = 0; + s64 epoch_off_rem = 0; + s64 epoch_off_ns = 0; + u32 epoch = 0; + u64 ext = 0; + u64 wraps = 0; + struct ath9k_ptp_evtlog_entry *entry; + + if (!READ_ONCE(sc->ptp_evtlog_enable) || !sc->ptp_evtlog) + return; + + ext = READ_ONCE(sc->ptp_rx_wrap_ext); + if (READ_ONCE(sc->ptp_tx_wrap_ext) > ext) + ext = READ_ONCE(sc->ptp_tx_wrap_ext); + if (ext) + wraps = ext >> 32; + tsf_offset = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + + seq = (u32)atomic_inc_return(&sc->ptp_evtlog_seq); + entry = &sc->ptp_evtlog[seq & sc->ptp_evtlog_mask]; + + WRITE_ONCE(entry->event, ATH9K_PTP_EVT_SETTSF); + WRITE_ONCE(entry->tsf64, tsf_new); + WRITE_ONCE(entry->tstamp, 0); + WRITE_ONCE(entry->duration, 0); + WRITE_ONCE(entry->tstamp_last, 0); + WRITE_ONCE(entry->tstamp_delta, 0); + WRITE_ONCE(entry->tstamp_delta_s, 0); + WRITE_ONCE(entry->wrap_valid, 0); + WRITE_ONCE(entry->ext_prev, 0); + WRITE_ONCE(entry->ext, ext); + WRITE_ONCE(entry->hwtstamp_ns, 0); + WRITE_ONCE(entry->tsf_offset, tsf_offset); + WRITE_ONCE(entry->phc_tsf, (u64)((s64)tsf_new + tsf_offset)); + WRITE_ONCE(entry->adj_seq, READ_ONCE(sc->ptp_adj_seq)); + WRITE_ONCE(entry->rebases, READ_ONCE(sc->ptp_wrap_rebase_cnt)); + WRITE_ONCE(entry->wraps, wraps); + WRITE_ONCE(entry->anchor_phc, READ_ONCE(sc->sc_ah->ptp_rebase_phc)); + WRITE_ONCE(entry->anchor_valid, + READ_ONCE(sc->sc_ah->ptp_rebase_anchor_valid) ? 1 : 0); + WRITE_ONCE(entry->settime_old_ns, 0); + WRITE_ONCE(entry->settime_new_ns, 0); + WRITE_ONCE(entry->settime_delta, 0); + WRITE_ONCE(entry->settsf_old, tsf_old); + WRITE_ONCE(entry->settsf_new, tsf_new); + WRITE_ONCE(entry->settsf_delta, (s64)tsf_new - (s64)tsf_old); + WRITE_ONCE(entry->settsf_read_start_ns, + READ_ONCE(sc->sc_ah->ptp_settsf_read_start_ns)); + WRITE_ONCE(entry->settsf_read_end_ns, + READ_ONCE(sc->sc_ah->ptp_settsf_read_end_ns)); + WRITE_ONCE(entry->settsf_write_start_ns, + READ_ONCE(sc->sc_ah->ptp_settsf_write_start_ns)); + WRITE_ONCE(entry->settsf_write_end_ns, + READ_ONCE(sc->sc_ah->ptp_settsf_write_end_ns)); + WRITE_ONCE(entry->settsf_read_ns, + READ_ONCE(sc->sc_ah->ptp_settsf_read_ns)); + WRITE_ONCE(entry->settsf_write_ns, + READ_ONCE(sc->sc_ah->ptp_settsf_write_ns)); + WRITE_ONCE(entry->settsf_total_ns, + READ_ONCE(sc->sc_ah->ptp_settsf_total_ns)); + ath9k_ptp_epoch_snapshot(sc, NULL, &epoch_off_us, &epoch_off_rem, + &epoch, NULL); + epoch_off_ns = epoch_off_us * (s64)NSEC_PER_USEC + epoch_off_rem; + WRITE_ONCE(entry->settsf_epoch, epoch); + WRITE_ONCE(entry->settsf_epoch_offset_ns, epoch_off_ns); + WRITE_ONCE(entry->settsf_epoch_valid, 1); + WRITE_ONCE(entry->tsf_hi1, 0); + WRITE_ONCE(entry->tsf_lo, 0); + WRITE_ONCE(entry->tsf_hi2, 0); + WRITE_ONCE(entry->cc_last, 0); + WRITE_ONCE(entry->cc_new, 0); + WRITE_ONCE(entry->cc_delta, 0); + WRITE_ONCE(entry->cc_mult, sc->cc.mult); + WRITE_ONCE(entry->cc_mask, sc->cc.mask); + WRITE_ONCE(entry->cc_shift, sc->cc.shift); + ath9k_ptp_evtlog_fill_sample(entry, NULL); + ath9k_ptp_evtlog_copy_skb(entry, NULL); + ath9k_ptp_evtlog_copy_stack(sc, entry); + WRITE_ONCE(entry->caller_pid, 0); + entry->caller_comm[0] = '\0'; + WRITE_ONCE(entry->seq, seq); +} + +/* Log a reset TSF event into the ring buffer. */ +static inline void ath9k_ptp_evtlog_resettsf(struct ath_softc *sc, u64 tsf_old, + u64 tsf_new) +{ + u32 seq; + s64 tsf_offset; + u64 ext = 0; + u64 wraps = 0; + struct ath9k_ptp_evtlog_entry *entry; + + if (!READ_ONCE(sc->ptp_evtlog_enable) || !sc->ptp_evtlog) + return; + + ext = READ_ONCE(sc->ptp_rx_wrap_ext); + if (READ_ONCE(sc->ptp_tx_wrap_ext) > ext) + ext = READ_ONCE(sc->ptp_tx_wrap_ext); + if (ext) + wraps = ext >> 32; + tsf_offset = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + + seq = (u32)atomic_inc_return(&sc->ptp_evtlog_seq); + entry = &sc->ptp_evtlog[seq & sc->ptp_evtlog_mask]; + + WRITE_ONCE(entry->event, ATH9K_PTP_EVT_RESET_TSF); + WRITE_ONCE(entry->tsf64, tsf_new); + WRITE_ONCE(entry->tstamp, 0); + WRITE_ONCE(entry->duration, 0); + WRITE_ONCE(entry->tstamp_last, 0); + WRITE_ONCE(entry->tstamp_delta, 0); + WRITE_ONCE(entry->tstamp_delta_s, 0); + WRITE_ONCE(entry->wrap_valid, 0); + WRITE_ONCE(entry->ext_prev, 0); + WRITE_ONCE(entry->ext, ext); + WRITE_ONCE(entry->hwtstamp_ns, 0); + WRITE_ONCE(entry->tsf_offset, tsf_offset); + WRITE_ONCE(entry->phc_tsf, (u64)((s64)tsf_new + tsf_offset)); + WRITE_ONCE(entry->adj_seq, READ_ONCE(sc->ptp_adj_seq)); + WRITE_ONCE(entry->rebases, READ_ONCE(sc->ptp_wrap_rebase_cnt)); + WRITE_ONCE(entry->wraps, wraps); + WRITE_ONCE(entry->anchor_phc, READ_ONCE(sc->sc_ah->ptp_rebase_phc)); + WRITE_ONCE(entry->anchor_valid, + READ_ONCE(sc->sc_ah->ptp_rebase_anchor_valid) ? 1 : 0); + WRITE_ONCE(entry->settime_old_ns, 0); + WRITE_ONCE(entry->settime_new_ns, 0); + WRITE_ONCE(entry->settime_delta, 0); + WRITE_ONCE(entry->settsf_old, tsf_old); + WRITE_ONCE(entry->settsf_new, tsf_new); + WRITE_ONCE(entry->settsf_delta, (s64)tsf_new - (s64)tsf_old); + WRITE_ONCE(entry->settsf_read_start_ns, 0); + WRITE_ONCE(entry->settsf_read_end_ns, 0); + WRITE_ONCE(entry->settsf_write_start_ns, 0); + WRITE_ONCE(entry->settsf_write_end_ns, 0); + WRITE_ONCE(entry->settsf_read_ns, 0); + WRITE_ONCE(entry->settsf_write_ns, 0); + WRITE_ONCE(entry->settsf_total_ns, 0); + WRITE_ONCE(entry->settsf_epoch, 0); + WRITE_ONCE(entry->settsf_epoch_offset_ns, 0); + WRITE_ONCE(entry->settsf_epoch_valid, 0); + WRITE_ONCE(entry->tsf_hi1, 0); + WRITE_ONCE(entry->tsf_lo, 0); + WRITE_ONCE(entry->tsf_hi2, 0); + WRITE_ONCE(entry->cc_last, 0); + WRITE_ONCE(entry->cc_new, 0); + WRITE_ONCE(entry->cc_delta, 0); + WRITE_ONCE(entry->cc_mult, sc->cc.mult); + WRITE_ONCE(entry->cc_mask, sc->cc.mask); + WRITE_ONCE(entry->cc_shift, sc->cc.shift); + ath9k_ptp_evtlog_fill_sample(entry, NULL); + ath9k_ptp_evtlog_copy_skb(entry, NULL); + ath9k_ptp_evtlog_copy_stack(sc, entry); + WRITE_ONCE(entry->caller_pid, 0); + entry->caller_comm[0] = '\0'; + WRITE_ONCE(entry->seq, seq); +} + +/* Log a cyclecounter glitch event into the ring buffer. */ +static inline void ath9k_ptp_evtlog_cc_glitch(struct ath_softc *sc, u64 tsf64, + u32 tsf_hi1, u32 tsf_lo, + u32 tsf_hi2, s64 tsf_offset, + u64 cc_last, u64 cc_new, + s64 cc_delta) +{ + u32 seq; + u64 ext = 0; + u64 wraps = 0; + struct ath9k_ptp_evtlog_entry *entry; + + if (!READ_ONCE(sc->ptp_evtlog_enable) || !sc->ptp_evtlog) + return; + + ext = READ_ONCE(sc->ptp_rx_wrap_ext); + if (READ_ONCE(sc->ptp_tx_wrap_ext) > ext) + ext = READ_ONCE(sc->ptp_tx_wrap_ext); + if (ext) + wraps = ext >> 32; + + seq = (u32)atomic_inc_return(&sc->ptp_evtlog_seq); + entry = &sc->ptp_evtlog[seq & sc->ptp_evtlog_mask]; + + WRITE_ONCE(entry->event, ATH9K_PTP_EVT_CC_GLITCH); + WRITE_ONCE(entry->tsf64, tsf64); + WRITE_ONCE(entry->tstamp, 0); + WRITE_ONCE(entry->duration, 0); + WRITE_ONCE(entry->tstamp_last, 0); + WRITE_ONCE(entry->tstamp_delta, 0); + WRITE_ONCE(entry->tstamp_delta_s, 0); + WRITE_ONCE(entry->wrap_valid, 0); + WRITE_ONCE(entry->ext_prev, 0); + WRITE_ONCE(entry->ext, ext); + WRITE_ONCE(entry->hwtstamp_ns, cc_new); + WRITE_ONCE(entry->tsf_offset, tsf_offset); + WRITE_ONCE(entry->phc_tsf, (u64)((s64)tsf64 + tsf_offset)); + WRITE_ONCE(entry->adj_seq, READ_ONCE(sc->ptp_adj_seq)); + WRITE_ONCE(entry->rebases, READ_ONCE(sc->ptp_wrap_rebase_cnt)); + WRITE_ONCE(entry->wraps, wraps); + WRITE_ONCE(entry->anchor_phc, READ_ONCE(sc->sc_ah->ptp_rebase_phc)); + WRITE_ONCE(entry->anchor_valid, + READ_ONCE(sc->sc_ah->ptp_rebase_anchor_valid) ? 1 : 0); + WRITE_ONCE(entry->settime_old_ns, 0); + WRITE_ONCE(entry->settime_new_ns, 0); + WRITE_ONCE(entry->settime_delta, 0); + WRITE_ONCE(entry->settsf_old, 0); + WRITE_ONCE(entry->settsf_new, 0); + WRITE_ONCE(entry->settsf_delta, 0); + WRITE_ONCE(entry->tsf_hi1, tsf_hi1); + WRITE_ONCE(entry->tsf_lo, tsf_lo); + WRITE_ONCE(entry->tsf_hi2, tsf_hi2); + WRITE_ONCE(entry->cc_last, cc_last); + WRITE_ONCE(entry->cc_new, cc_new); + WRITE_ONCE(entry->cc_delta, cc_delta); + WRITE_ONCE(entry->cc_mult, sc->cc.mult); + WRITE_ONCE(entry->cc_mask, sc->cc.mask); + WRITE_ONCE(entry->cc_shift, sc->cc.shift); + WRITE_ONCE(entry->stack_len, 0); + ath9k_ptp_evtlog_fill_sample(entry, NULL); + ath9k_ptp_evtlog_copy_skb(entry, NULL); + WRITE_ONCE(entry->caller_pid, 0); + entry->caller_comm[0] = '\0'; + WRITE_ONCE(entry->seq, seq); +} + +/* + * Convert cycles to nanoseconds without overflowing the intermediate + * cycles * mult product. For large stale timecounter snapshots this keeps + * the conversion exact by processing the delta in safe chunks. + */ +static inline u64 ath9k_cc_cyc2ns_chunked(const struct cyclecounter *cc, + u64 cycles, u64 mask, u64 *frac) +{ + u64 total = 0; + u64 step_limit; + + if (!cycles || !cc->mult) + return 0; + + step_limit = div64_u64(U64_MAX - mask, cc->mult); + if (!step_limit) + step_limit = 1; + + while (cycles) { + u64 step = min_t(u64, cycles, step_limit); + + total += cyclecounter_cyc2ns(cc, step, mask, frac); + cycles -= step; + } + + return total; +} + +static inline u64 ath9k_cc_cyc2ns_backwards_chunked(const struct cyclecounter *cc, + u64 cycles, u64 mask, + u64 frac) +{ + u64 frac_local; + u64 ns; + + if (!cycles) + return 0; + + if (!frac) { + frac_local = 0; + return ath9k_cc_cyc2ns_chunked(cc, cycles, mask, &frac_local); + } + + frac_local = ((mask + 1) - frac) & mask; + ns = ath9k_cc_cyc2ns_chunked(cc, cycles, mask, &frac_local); + + return ns ? ns - 1 : 0; +} + +/* Record a timecounter trace entry. */ +static inline void ath9k_ptp_tc_trace_add(struct ath_softc *sc, u8 event, + u8 reason, u8 backward, + u64 cycle_in, u64 cycle_last, + u64 nsec, u64 frac, u64 delta, + u64 ns_offset, u64 ns_out) +{ + u32 seq; + struct ath9k_ptp_tc_trace_entry *entry; + + if (!READ_ONCE(sc->ptp_tc_trace_enable) || !sc->ptp_tc_trace) + return; + + seq = (u32)atomic_inc_return(&sc->ptp_tc_trace_seq); + entry = &sc->ptp_tc_trace[seq & sc->ptp_tc_trace_mask]; + + WRITE_ONCE(entry->event, event); + WRITE_ONCE(entry->reason, reason); + WRITE_ONCE(entry->backward, backward); + WRITE_ONCE(entry->ts_ns, ktime_get_ns()); + WRITE_ONCE(entry->cycle_in, cycle_in); + WRITE_ONCE(entry->cycle_last, cycle_last); + WRITE_ONCE(entry->nsec, nsec); + WRITE_ONCE(entry->frac, frac); + WRITE_ONCE(entry->delta, delta); + WRITE_ONCE(entry->ns_offset, ns_offset); + WRITE_ONCE(entry->ns_out, ns_out); + WRITE_ONCE(entry->cc_mult, sc->cc.mult); + WRITE_ONCE(entry->cc_mask, sc->cc.mask); + WRITE_ONCE(entry->cc_shift, sc->cc.shift); + WRITE_ONCE(entry->caller_pid, 0); + entry->caller_comm[0] = '\0'; + WRITE_ONCE(entry->seq, seq); +} + +/* Trace a timecounter cyc2time conversion. */ +static inline void ath9k_ptp_tc_trace_cyc2time(struct ath_softc *sc, + u64 cycle_in, u64 cycle_last, + u64 nsec, u64 frac, u64 ns_out, + u8 reason) +{ + u64 delta = (cycle_in - cycle_last) & sc->cc.mask; + u64 ns_offset; + u64 frac_local = frac; + bool backward; + u64 delta_use; + + backward = delta > (sc->cc.mask >> 1); + if (backward) { + delta_use = (cycle_last - cycle_in) & sc->cc.mask; + ns_offset = ath9k_cc_cyc2ns_backwards_chunked(&sc->cc, + delta_use, + sc->tc.mask, + frac_local); + } else { + delta_use = delta; + ns_offset = ath9k_cc_cyc2ns_chunked(&sc->cc, delta_use, + sc->tc.mask, &frac_local); + } + + ath9k_ptp_tc_trace_add(sc, ATH9K_PTP_TC_TRACE_CYC2TIME, + reason, backward ? 1 : 0, + cycle_in, cycle_last, nsec, frac, + delta, ns_offset, ns_out); +} + +static inline void ath9k_ptp_handoff_add(struct ath_softc *sc, u8 dir, + u8 ptp_msgtype, u16 ptp_seqid, + u64 handoff_ns, u64 hwtstamp_ns, + const struct ath9k_ptp_sample *sample, + const struct sk_buff *skb, + const struct ath9k_ptp_handoff_rx_meta *rx_meta, + const struct ath9k_ptp_handoff_tx_meta *tx_meta) +{ + u32 seq; + struct ath9k_ptp_handoff_entry *entry; + u16 frame_control = 0; + u16 seq_ctrl = 0; + u16 wlan_seq = 0; + u16 frag = 0; + u16 qos_control = 0; + u8 retry = 0; + u8 qos_valid = 0; + u8 tid = 0xff; + u8 skb_priority = 0; + u8 skb_queue = 0; + u8 tx_hw_queue = 0; + u8 txq_qnum = 0; + s8 txq_mac80211_qnum = -1; + u8 tx_qid = 0; + u8 tx_tid = 0xff; + u8 tx_rateindex = 0; + u8 tx_shortretry = 0; + u8 tx_longretry = 0; + s8 tx_rssi = 0; + u32 txq_depth = 0; + u32 txq_ampdu_depth = 0; + u32 txq_pending_frames = 0; + u16 txq_aifs = 0; + u16 txq_cwmin = 0; + u16 txq_cwmax = 0; + u32 txq_burst_time = 0; + u32 txq_ready_time = 0; + u8 bcnq_qnum = 0; + u16 bcnq_aifs = 0; + u16 bcnq_cwmin = 0; + u16 bcnq_cwmax = 0; + u32 bcnq_burst_time = 0; + u32 bcnq_ready_time = 0; + u16 tx_status = 0; + u16 tx_flags = 0; + u32 tx_info_flags = 0; + u32 rx_rxs_flags = 0; + u32 rx_ampdu_reference = 0; + u16 rx_enc_flags = 0; + u8 rx_rs_status = 0; + u8 rx_rs_flags = 0; + u8 rx_phyerr = 0; + s8 rx_rssi = 0; + s8 rx_signal = 0; + u8 rx_keyix = 0; + u8 rx_rate = 0; + u8 rx_rate_idx = 0; + u8 rx_antenna = 0; + u8 rx_bw = 0; + u8 rx_nss = 0; + u8 rx_chains = 0; + u8 rx_more = 0; + u8 rx_isaggr = 0; + u8 rx_firstaggr = 0; + u8 rx_moreaggr = 0; + u8 rx_num_delims = 0; + u8 addr1[ETH_ALEN] = { 0 }; + u8 addr2[ETH_ALEN] = { 0 }; + u8 addr3[ETH_ALEN] = { 0 }; + + if (!sc->ptp_handoff || !READ_ONCE(sc->ptp_evtlog_enable)) + return; + + if (skb && skb->len >= sizeof(struct ieee80211_hdr_3addr)) { + const struct ieee80211_hdr *hdr; + + hdr = (const struct ieee80211_hdr *)skb->data; + frame_control = le16_to_cpu(hdr->frame_control); + seq_ctrl = le16_to_cpu(hdr->seq_ctrl); + wlan_seq = seq_ctrl >> IEEE80211_SEQ_SEQ_SHIFT; + frag = seq_ctrl & IEEE80211_SCTL_FRAG; + retry = !!(frame_control & IEEE80211_FCTL_RETRY); + skb_priority = skb->priority; + skb_queue = skb_get_queue_mapping(skb); + ether_addr_copy(addr1, hdr->addr1); + ether_addr_copy(addr2, hdr->addr2); + ether_addr_copy(addr3, hdr->addr3); + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u16 hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (skb->len >= hdrlen) { + u8 *qc = ieee80211_get_qos_ctl((void *)hdr); + + qos_control = qc[0] | (qc[1] << 8); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + qos_valid = 1; + } + } + } + + if (rx_meta) { + rx_rxs_flags = rx_meta->rx_rxs_flags; + rx_ampdu_reference = rx_meta->rx_ampdu_reference; + rx_enc_flags = rx_meta->rx_enc_flags; + rx_rs_status = rx_meta->rx_rs_status; + rx_rs_flags = rx_meta->rx_rs_flags; + rx_phyerr = rx_meta->rx_phyerr; + rx_rssi = rx_meta->rx_rssi; + rx_signal = rx_meta->rx_signal; + rx_keyix = rx_meta->rx_keyix; + rx_rate = rx_meta->rx_rate; + rx_rate_idx = rx_meta->rx_rate_idx; + rx_antenna = rx_meta->rx_antenna; + rx_bw = rx_meta->rx_bw; + rx_nss = rx_meta->rx_nss; + rx_chains = rx_meta->rx_chains; + rx_more = rx_meta->rx_more; + rx_isaggr = rx_meta->rx_isaggr; + rx_firstaggr = rx_meta->rx_firstaggr; + rx_moreaggr = rx_meta->rx_moreaggr; + rx_num_delims = rx_meta->rx_num_delims; + } + + if (tx_meta) { + tx_info_flags = tx_meta->tx_info_flags; + tx_status = tx_meta->tx_status; + tx_flags = tx_meta->tx_flags; + tx_hw_queue = tx_meta->tx_hw_queue; + txq_qnum = tx_meta->txq_qnum; + txq_mac80211_qnum = tx_meta->txq_mac80211_qnum; + tx_qid = tx_meta->tx_qid; + tx_tid = tx_meta->tx_tid; + tx_rateindex = tx_meta->tx_rateindex; + tx_shortretry = tx_meta->tx_shortretry; + tx_longretry = tx_meta->tx_longretry; + tx_rssi = tx_meta->tx_rssi; + txq_depth = tx_meta->txq_depth; + txq_ampdu_depth = tx_meta->txq_ampdu_depth; + txq_pending_frames = tx_meta->txq_pending_frames; + txq_aifs = tx_meta->txq_aifs; + txq_cwmin = tx_meta->txq_cwmin; + txq_cwmax = tx_meta->txq_cwmax; + txq_burst_time = tx_meta->txq_burst_time; + txq_ready_time = tx_meta->txq_ready_time; + bcnq_qnum = tx_meta->bcnq_qnum; + bcnq_aifs = tx_meta->bcnq_aifs; + bcnq_cwmin = tx_meta->bcnq_cwmin; + bcnq_cwmax = tx_meta->bcnq_cwmax; + bcnq_burst_time = tx_meta->bcnq_burst_time; + bcnq_ready_time = tx_meta->bcnq_ready_time; + } + + seq = (u32)atomic_inc_return(&sc->ptp_handoff_seq); + entry = &sc->ptp_handoff[seq & sc->ptp_handoff_mask]; + + WRITE_ONCE(entry->dir, dir); + WRITE_ONCE(entry->ptp_msgtype, ptp_msgtype); + WRITE_ONCE(entry->epoch_valid, + READ_ONCE(sample->epoch_valid) ? 1 : 0); + WRITE_ONCE(entry->hwts_valid, hwtstamp_ns ? 1 : 0); + WRITE_ONCE(entry->ptp_seqid, ptp_seqid); + WRITE_ONCE(entry->frame_control, frame_control); + WRITE_ONCE(entry->seq_ctrl, seq_ctrl); + WRITE_ONCE(entry->wlan_seq, wlan_seq); + WRITE_ONCE(entry->frag, frag); + WRITE_ONCE(entry->qos_control, qos_control); + WRITE_ONCE(entry->retry, retry); + WRITE_ONCE(entry->qos_valid, qos_valid); + WRITE_ONCE(entry->tid, tid); + WRITE_ONCE(entry->skb_priority, skb_priority); + WRITE_ONCE(entry->skb_queue, skb_queue); + WRITE_ONCE(entry->tx_hw_queue, tx_hw_queue); + WRITE_ONCE(entry->txq_qnum, txq_qnum); + WRITE_ONCE(entry->txq_mac80211_qnum, txq_mac80211_qnum); + WRITE_ONCE(entry->tx_qid, tx_qid); + WRITE_ONCE(entry->tx_tid, tx_tid); + WRITE_ONCE(entry->tx_rateindex, tx_rateindex); + WRITE_ONCE(entry->tx_shortretry, tx_shortretry); + WRITE_ONCE(entry->tx_longretry, tx_longretry); + WRITE_ONCE(entry->tx_rssi, tx_rssi); + WRITE_ONCE(entry->txq_depth, txq_depth); + WRITE_ONCE(entry->txq_ampdu_depth, txq_ampdu_depth); + WRITE_ONCE(entry->txq_pending_frames, txq_pending_frames); + WRITE_ONCE(entry->txq_aifs, txq_aifs); + WRITE_ONCE(entry->txq_cwmin, txq_cwmin); + WRITE_ONCE(entry->txq_cwmax, txq_cwmax); + WRITE_ONCE(entry->txq_burst_time, txq_burst_time); + WRITE_ONCE(entry->txq_ready_time, txq_ready_time); + WRITE_ONCE(entry->bcnq_qnum, bcnq_qnum); + WRITE_ONCE(entry->bcnq_aifs, bcnq_aifs); + WRITE_ONCE(entry->bcnq_cwmin, bcnq_cwmin); + WRITE_ONCE(entry->bcnq_cwmax, bcnq_cwmax); + WRITE_ONCE(entry->bcnq_burst_time, bcnq_burst_time); + WRITE_ONCE(entry->bcnq_ready_time, bcnq_ready_time); + WRITE_ONCE(entry->tx_status, tx_status); + WRITE_ONCE(entry->tx_flags, tx_flags); + WRITE_ONCE(entry->tx_info_flags, tx_info_flags); + WRITE_ONCE(entry->rx_rxs_flags, rx_rxs_flags); + WRITE_ONCE(entry->rx_ampdu_reference, rx_ampdu_reference); + WRITE_ONCE(entry->rx_enc_flags, rx_enc_flags); + WRITE_ONCE(entry->rx_rs_status, rx_rs_status); + WRITE_ONCE(entry->rx_rs_flags, rx_rs_flags); + WRITE_ONCE(entry->rx_phyerr, rx_phyerr); + WRITE_ONCE(entry->rx_rssi, rx_rssi); + WRITE_ONCE(entry->rx_signal, rx_signal); + WRITE_ONCE(entry->rx_keyix, rx_keyix); + WRITE_ONCE(entry->rx_rate, rx_rate); + WRITE_ONCE(entry->rx_rate_idx, rx_rate_idx); + WRITE_ONCE(entry->rx_antenna, rx_antenna); + WRITE_ONCE(entry->rx_bw, rx_bw); + WRITE_ONCE(entry->rx_nss, rx_nss); + WRITE_ONCE(entry->rx_chains, rx_chains); + WRITE_ONCE(entry->rx_more, rx_more); + WRITE_ONCE(entry->rx_isaggr, rx_isaggr); + WRITE_ONCE(entry->rx_firstaggr, rx_firstaggr); + WRITE_ONCE(entry->rx_moreaggr, rx_moreaggr); + WRITE_ONCE(entry->rx_num_delims, rx_num_delims); + ether_addr_copy(entry->addr1, addr1); + ether_addr_copy(entry->addr2, addr2); + ether_addr_copy(entry->addr3, addr3); + WRITE_ONCE(entry->handoff_ns, handoff_ns); + WRITE_ONCE(entry->hwtstamp_ns, hwtstamp_ns); + WRITE_ONCE(entry->tsf64, READ_ONCE(sample->tsf64)); + WRITE_ONCE(entry->tstamp, READ_ONCE(sample->tstamp)); + WRITE_ONCE(entry->duration, READ_ONCE(sample->duration)); + WRITE_ONCE(entry->ext, READ_ONCE(sample->ext)); + WRITE_ONCE(entry->wraps, READ_ONCE(sample->ext) >> 32); + WRITE_ONCE(entry->tsf_offset, READ_ONCE(sample->tsf_offset)); + WRITE_ONCE(entry->rebase_cnt, READ_ONCE(sample->rebase_cnt)); + WRITE_ONCE(entry->epoch, READ_ONCE(sample->epoch)); + WRITE_ONCE(entry->sample_cycle, READ_ONCE(sample->cycle)); + WRITE_ONCE(entry->sample_ns, READ_ONCE(sample->ns)); + WRITE_ONCE(entry->tc_cycle_last, READ_ONCE(sample->tc_cycle_last)); + WRITE_ONCE(entry->tc_nsec, READ_ONCE(sample->tc_nsec)); + WRITE_ONCE(entry->tc_frac, READ_ONCE(sample->tc_frac)); + WRITE_ONCE(entry->adj_seq, READ_ONCE(sample->adj_seq)); + WRITE_ONCE(entry->seq, seq); +} #define ATH9K_PTP_FAKE_SHIFT 21 #endif /* ATH9K_H */ diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 7c0a5065cc4e..b7f1b51ce754 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -247,7 +247,7 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc) struct ieee80211_vif *vif; struct ath_vif *avp; s64 tsfadjust; - u32 offset; + u64 offset; int first_slot = ATH_BCBUF; int slot; @@ -290,10 +290,23 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc) "Adjusting global TSF after beacon slot reassignment: %lld\n", (signed long long)tsfadjust); + if (!ath9k_ptp_guard_allow_settsf(sc)) + goto out; + + { + u64 now_ns = ktime_get_ns(); + u64 last_evt = READ_ONCE(sc->ptp_last_event_ns); + + WRITE_ONCE(sc->ptp_settsf_last_ns, now_ns); + if (last_evt) + WRITE_ONCE(sc->ptp_settsf_last_delta_ns, + now_ns - last_evt); + } + /* Modify TSF as required and update the HW. */ avp->chanctx->tsf_val += tsfadjust; if (sc->cur_chan == avp->chanctx) { - offset = ath9k_hw_get_tsf_offset(&avp->chanctx->tsf_ts, NULL); + offset = ath9k_hw_get_tsf_offset64(&avp->chanctx->tsf_ts, NULL); ath9k_hw_settsf64(sc->sc_ah, avp->chanctx->tsf_val + offset); } @@ -314,8 +327,6 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc) if (sc->sc_ah->opmode != NL80211_IFTYPE_AP && sc->sc_ah->opmode != NL80211_IFTYPE_MESH_POINT) { - // focus: wiptp - // ath_dbg(common, BEACON, "slot 0, tsf: %llu\n", ath9k_hw_gettsf64(sc->sc_ah)); return 0; } diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 4dbdfcc12583..3302b08496a7 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -17,6 +17,11 @@ #include #include #include +#include +#include +#include +#include +#include #include #include "ath9k.h" @@ -121,58 +126,2386 @@ static const struct file_operations fops_debug = { #endif -// focus: wiptp start -static ssize_t read_file_dirtyts(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; - u8 buf[sizeof(u64)]; +/* Reset a PTP monitor entry to its initial state. */ +static void ath9k_ptp_mon_reset_entry(struct ath9k_ptp_mon_entry *entry) +{ + WRITE_ONCE(entry->last, 0); + WRITE_ONCE(entry->max_fwd, 0); + WRITE_ONCE(entry->max_back, 0); + atomic64_set(&entry->back_cnt, 0); +} + +/* Expose PTP monitor snapshot via debugfs. */ +static ssize_t read_file_ptp_mon(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath9k_ptp_mon_entry *e; + char buf[384]; + int len; + + e = &sc->ptp_mon.tsf64; + len = scnprintf(buf, sizeof(buf), + "tsf64 last=%llu max_fwd=%llu max_back=%llu back_cnt=%lld\n", + (unsigned long long)READ_ONCE(e->last), + (unsigned long long)READ_ONCE(e->max_fwd), + (unsigned long long)READ_ONCE(e->max_back), + (long long)atomic64_read(&e->back_cnt)); + + e = &sc->ptp_mon.rx_ext; + len += scnprintf(buf + len, sizeof(buf) - len, + "rx_ext last=%llu max_fwd=%llu max_back=%llu back_cnt=%lld\n", + (unsigned long long)READ_ONCE(e->last), + (unsigned long long)READ_ONCE(e->max_fwd), + (unsigned long long)READ_ONCE(e->max_back), + (long long)atomic64_read(&e->back_cnt)); + + e = &sc->ptp_mon.phc_ns; + len += scnprintf(buf + len, sizeof(buf) - len, + "phc_ns last=%llu max_fwd=%llu max_back=%llu back_cnt=%lld\n", + (unsigned long long)READ_ONCE(e->last), + (unsigned long long)READ_ONCE(e->max_fwd), + (unsigned long long)READ_ONCE(e->max_back), + (long long)atomic64_read(&e->back_cnt)); - memcpy(buf, &sc->ptp_dirtyts, sizeof buf); - return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } -static ssize_t write_file_dirtyts(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) { - struct ath_softc *sc = file->private_data; - u8 buf[sizeof(u64)]; - ssize_t len; - u64 dirty_cycle; - u32 raw_tsf; - s64 delta; - s64 dirty_ns; - unsigned long flags; - u32 remain; - - len = simple_write_to_buffer(buf, sizeof buf, ppos, user_buf, count); - if (len < 0) { - return len; - } - if (len < sizeof buf) { - return -EINVAL; - } +/* Reset PTP monitor counters via debugfs. */ +static ssize_t write_file_ptp_mon(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; - memcpy(&dirty_cycle, buf, sizeof buf); - raw_tsf = (dirty_cycle >> 32); - remain = dirty_cycle & 0xffffffffU; - - spin_lock_irqsave(&sc->systim_lock, flags); - dirty_ns = timecounter_cyc2time(&sc->tc, raw_tsf); - delta = 0; - if (remain) { - u64 frac = 0; - delta = cyclecounter_cyc2ns(&sc->cc, 1, sc->cc.mask, &frac); - delta = delta * remain / 1000; - } - spin_unlock_irqrestore(&sc->systim_lock, flags); + (void)user_buf; + (void)ppos; + + ath9k_ptp_mon_reset_entry(&sc->ptp_mon.tsf64); + ath9k_ptp_mon_reset_entry(&sc->ptp_mon.rx_ext); + ath9k_ptp_mon_reset_entry(&sc->ptp_mon.phc_ns); + + return count; +} + +static const struct file_operations fops_ptp_mon = { + .read = read_file_ptp_mon, + .write = write_file_ptp_mon, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Expose timecounter snapshot via debugfs. */ +static ssize_t read_file_ptp_tc(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[256]; + unsigned long flags; + u64 cycle_last; + u64 nsec; + u64 frac; + u64 mask; + u32 cc_mult; + u32 cc_shift; + u64 cc_mask; + int len; + + spin_lock_irqsave(&sc->systim_lock, flags); + cycle_last = sc->tc.cycle_last; + nsec = sc->tc.nsec; + frac = sc->tc.frac; + mask = sc->tc.mask; + cc_mult = sc->cc.mult; + cc_shift = sc->cc.shift; + cc_mask = sc->cc.mask; + spin_unlock_irqrestore(&sc->systim_lock, flags); + + len = scnprintf(buf, sizeof(buf), + "cycle_last=%llu nsec=%llu frac=%llu mask=%#llx cc_mult=%u cc_shift=%u cc_mask=%#llx\n", + (unsigned long long)cycle_last, + (unsigned long long)nsec, + (unsigned long long)frac, + (unsigned long long)mask, + cc_mult, + cc_shift, + (unsigned long long)cc_mask); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_tc = { + .read = read_file_ptp_tc, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Expose last timecounter read trace via debugfs. */ +static ssize_t read_file_ptp_tc_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[256]; + unsigned long flags; + u64 now_ns; + u64 cycle_last; + u64 nsec; + u64 frac; + u64 mask; + u32 cc_mult; + u32 cc_shift; + u64 cc_mask; + int len; + + spin_lock_irqsave(&sc->systim_lock, flags); + now_ns = ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_GETTIME); + cycle_last = sc->tc.cycle_last; + nsec = sc->tc.nsec; + frac = sc->tc.frac; + mask = sc->tc.mask; + cc_mult = sc->cc.mult; + cc_shift = sc->cc.shift; + cc_mask = sc->cc.mask; + spin_unlock_irqrestore(&sc->systim_lock, flags); + + len = scnprintf(buf, sizeof(buf), + "now_ns=%llu cycle_last=%llu nsec=%llu frac=%llu mask=%#llx cc_mult=%u cc_shift=%u cc_mask=%#llx\n", + (unsigned long long)now_ns, + (unsigned long long)cycle_last, + (unsigned long long)nsec, + (unsigned long long)frac, + (unsigned long long)mask, + cc_mult, + cc_shift, + (unsigned long long)cc_mask); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_tc_read = { + .read = read_file_ptp_tc_read, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Read current timecounter shift value. */ +static ssize_t read_file_ptp_cc_shift(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[64]; + unsigned long flags; + u32 cc_shift; + u32 cc_mult; + u32 cc_base; + int len; + + spin_lock_irqsave(&sc->systim_lock, flags); + cc_shift = sc->cc.shift; + cc_mult = sc->cc.mult; + cc_base = sc->cc_mult; + spin_unlock_irqrestore(&sc->systim_lock, flags); + + len = scnprintf(buf, sizeof(buf), "shift=%u cc_base=%u cc_mult=%u\n", + cc_shift, cc_base, cc_mult); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +/* Update timecounter shift and rescale the timecounter. */ +static ssize_t write_file_ptp_cc_shift(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + unsigned long shift; + unsigned long flags; + ssize_t ret; + u64 ns; + u64 pre_tc_nsec; + u64 pre_cycle_last; + u64 pre_frac; + + ret = kstrtoul_from_user(user_buf, count, 0, &shift); + if (ret) + return ret; + if (shift > 31) + return -EINVAL; + + spin_lock_irqsave(&sc->systim_lock, flags); + /* Keep the PHC time continuous across the shift change. */ + ns = ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_GETTIME); + pre_tc_nsec = sc->tc.nsec; + pre_cycle_last = sc->tc.cycle_last; + pre_frac = sc->tc.frac; + sc->cc.shift = shift; + sc->cc_mult = clocksource_khz2mult(1000, sc->cc.shift); + sc->ptp_last_adjfine_pre_mult = sc->cc.mult; + sc->cc.mult = adjust_by_scaled_ppm((u64)sc->cc_mult, + sc->ptp_last_adjfine); + sc->ptp_last_adjfine_post_mult = sc->cc.mult; + timecounter_init(&sc->tc, &sc->cc, ns); + ath9k_ptp_tc_mutation_record(sc, ATH9K_PTP_TC_MUT_CC_SHIFT, + ns, 0, shift, + pre_tc_nsec, pre_cycle_last, pre_frac, + sc->tc.nsec, sc->tc.cycle_last, + sc->tc.frac); + spin_unlock_irqrestore(&sc->systim_lock, flags); + + return count; +} + +static const struct file_operations fops_ptp_cc_shift = { + .read = read_file_ptp_cc_shift, + .write = write_file_ptp_cc_shift, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Debugfs hook to set TSF manually for testing. */ +static ssize_t write_file_ptp_settsf(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + u64 tsf; + ssize_t ret; + + ret = kstrtoull_from_user(user_buf, count, 0, &tsf); + if (ret) + return ret; + + if (!READ_ONCE(sc->ptp_settsf_enable)) + return -EPERM; + + mutex_lock(&sc->mutex); + ath9k_ps_wakeup(sc); + if (sc->cur_chan) { + ktime_get_raw_ts64(&sc->cur_chan->tsf_ts); + sc->cur_chan->tsf_val = tsf; + } + ath9k_hw_settsf64(sc->sc_ah, tsf); + ath9k_ps_restore(sc); + mutex_unlock(&sc->mutex); + + return count; +} + +static const struct file_operations fops_ptp_settsf = { + .write = write_file_ptp_settsf, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Expose last adjtime/adjfine details via debugfs. */ +static ssize_t read_file_ptp_adj(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[256]; + unsigned long flags; + s64 adjtime; + u64 adjtime_ns; + u64 adjtime_pre_ns; + u64 adjtime_post_ns; + s64 adjtime_expected_ns; + u64 adjtime_cycle; + long adjfine; + u64 adjfine_ns; + u64 adjfine_pre_mult; + u64 adjfine_post_mult; + u32 cc_mult; + u32 cc_base; + int len; + + spin_lock_irqsave(&sc->systim_lock, flags); + adjtime = sc->ptp_last_adjtime; + adjtime_ns = sc->ptp_last_adjtime_ns; + adjtime_pre_ns = sc->ptp_last_adjtime_pre_ns; + adjtime_post_ns = sc->ptp_last_adjtime_post_ns; + adjtime_expected_ns = sc->ptp_last_adjtime_expected_ns; + adjtime_cycle = sc->ptp_last_adjtime_cycle; + adjfine = sc->ptp_last_adjfine; + adjfine_ns = sc->ptp_last_adjfine_ns; + adjfine_pre_mult = sc->ptp_last_adjfine_pre_mult; + adjfine_post_mult = sc->ptp_last_adjfine_post_mult; + cc_mult = sc->cc.mult; + cc_base = sc->cc_mult; + spin_unlock_irqrestore(&sc->systim_lock, flags); + + len = scnprintf(buf, sizeof(buf), + "adjtime=%lld adjtime_ns=%llu adjtime_pre_ns=%llu adjtime_post_ns=%llu adjtime_expected_ns=%lld adjtime_cycle=%llu adjfine=%ld adjfine_ns=%llu adjfine_pre_mult=%llu adjfine_post_mult=%llu cc_mult=%u cc_base=%u\n", + (long long)adjtime, + (unsigned long long)adjtime_ns, + (unsigned long long)adjtime_pre_ns, + (unsigned long long)adjtime_post_ns, + (long long)adjtime_expected_ns, + (unsigned long long)adjtime_cycle, + adjfine, + (unsigned long long)adjfine_ns, + (unsigned long long)adjfine_pre_mult, + (unsigned long long)adjfine_post_mult, + cc_mult, + cc_base); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_adj = { + .read = read_file_ptp_adj, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static const char *ath9k_ptp_parse_reason_name(u8 reason) +{ + switch (reason) { + case ATH9K_PTP_PARSE_OK: + return "ok"; + case ATH9K_PTP_PARSE_NULL_SKB: + return "null_skb"; + case ATH9K_PTP_PARSE_SHORT_HDR: + return "short_hdr"; + case ATH9K_PTP_PARSE_NONDATA: + return "nondata"; + case ATH9K_PTP_PARSE_SHORT_PAYLOAD: + return "short_payload"; + case ATH9K_PTP_PARSE_NO_SNAP: + return "no_snap"; + case ATH9K_PTP_PARSE_SNAP_SHORT: + return "snap_short"; + case ATH9K_PTP_PARSE_L2_PTP: + return "l2_ptp"; + case ATH9K_PTP_PARSE_IPV4_SHORT: + return "ipv4_short"; + case ATH9K_PTP_PARSE_IPV4_NOT_UDP: + return "ipv4_not_udp"; + case ATH9K_PTP_PARSE_IPV4_NOT_PTP_PORT: + return "ipv4_not_ptp_port"; + case ATH9K_PTP_PARSE_IPV6_SHORT: + return "ipv6_short"; + case ATH9K_PTP_PARSE_IPV6_NOT_UDP: + return "ipv6_not_udp"; + case ATH9K_PTP_PARSE_IPV6_NOT_PTP_PORT: + return "ipv6_not_ptp_port"; + case ATH9K_PTP_PARSE_UNKNOWN_ETHERTYPE: + return "unknown_ethertype"; + case ATH9K_PTP_PARSE_PTP_SHORT: + return "ptp_short"; + default: + return "unknown"; + } +} + +static ssize_t read_file_ptp_rx_parse(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[1024]; + u8 bytes[ATH9K_PTP_RX_PARSE_BYTES]; + u8 bytes_len; + u8 reason; + int len; + int i; + + reason = READ_ONCE(sc->ptp_rx_parse_last_reason); + bytes_len = READ_ONCE(sc->ptp_rx_parse_last_bytes_len); + if (bytes_len > ATH9K_PTP_RX_PARSE_BYTES) + bytes_len = ATH9K_PTP_RX_PARSE_BYTES; + memcpy(bytes, sc->ptp_rx_parse_last_bytes, bytes_len); + + len = scnprintf(buf, sizeof(buf), + "attempt=%llu miss=%llu reason=%u reason_name=%s " + "msgtype=%u seq=%u skb_len=%u fc=0x%04x seq_ctrl=0x%04x " + "hdrlen=%u payload_len=%u snap_off=%u ethertype=0x%04x " + "ipver=%u ipproto=%u ihl=%u sport=%u dport=%u " + "ptp_off=%u ptp_len=%u bytes_len=%u bytes=", + (unsigned long long)READ_ONCE(sc->ptp_rx_parse_attempt_cnt), + (unsigned long long)READ_ONCE(sc->ptp_rx_parse_miss_cnt), + reason, + ath9k_ptp_parse_reason_name(reason), + READ_ONCE(sc->ptp_rx_parse_last_msgtype), + READ_ONCE(sc->ptp_rx_parse_last_seqid), + READ_ONCE(sc->ptp_rx_parse_last_skb_len), + READ_ONCE(sc->ptp_rx_parse_last_fc), + READ_ONCE(sc->ptp_rx_parse_last_seq_ctrl), + READ_ONCE(sc->ptp_rx_parse_last_hdrlen), + READ_ONCE(sc->ptp_rx_parse_last_payload_len), + READ_ONCE(sc->ptp_rx_parse_last_snap_off), + READ_ONCE(sc->ptp_rx_parse_last_ethertype), + READ_ONCE(sc->ptp_rx_parse_last_ip_version), + READ_ONCE(sc->ptp_rx_parse_last_ip_proto), + READ_ONCE(sc->ptp_rx_parse_last_ihl), + READ_ONCE(sc->ptp_rx_parse_last_sport), + READ_ONCE(sc->ptp_rx_parse_last_dport), + READ_ONCE(sc->ptp_rx_parse_last_ptp_off), + READ_ONCE(sc->ptp_rx_parse_last_ptp_len), + bytes_len); + + for (i = 0; i < bytes_len && len < sizeof(buf) - 4; i++) + len += scnprintf(buf + len, sizeof(buf) - len, "%02x", bytes[i]); + + len += scnprintf(buf + len, sizeof(buf) - len, "\n"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_rx_parse = { + .read = read_file_ptp_rx_parse, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_ptp_sta_hidden_step(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[512]; + int len; + + len = scnprintf(buf, sizeof(buf), + "cnt=%llu thresh_ns=%u last_host_ns=%llu last_tsf=%llu " + "delta_host_ns=%lld delta_tsf_ns=%lld corr_ns=%lld " + "abs_ns=%llu last_reanchor_ns=%llu " + "backwards_cnt=%llu backwards_delta_host_ns=%lld " + "backwards_delta_tsf_ns=%lld\n", + (unsigned long long)READ_ONCE(sc->ptp_sta_hidden_step_cnt), + READ_ONCE(sc->ptp_sta_hidden_step_thresh_ns), + (unsigned long long)READ_ONCE(sc->ptp_sta_hidden_step_last_host_ns), + (unsigned long long)READ_ONCE(sc->ptp_sta_hidden_step_last_tsf), + (long long)READ_ONCE(sc->ptp_sta_hidden_step_last_delta_host_ns), + (long long)READ_ONCE(sc->ptp_sta_hidden_step_last_delta_tsf_ns), + (long long)READ_ONCE(sc->ptp_sta_hidden_step_last_corr_ns), + (unsigned long long)READ_ONCE(sc->ptp_sta_hidden_step_last_abs_ns), + (unsigned long long)READ_ONCE(sc->ptp_sta_hidden_step_last_reanchor_ns), + (unsigned long long)READ_ONCE(sc->ptp_sta_hidden_step_backwards_cnt), + (long long)READ_ONCE(sc->ptp_sta_hidden_step_backwards_delta_host_ns), + (long long)READ_ONCE(sc->ptp_sta_hidden_step_backwards_delta_tsf_ns)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_sta_hidden_step = { + .read = read_file_ptp_sta_hidden_step, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Expose last RX timestamp sample and counters via debugfs. */ +static ssize_t read_file_ptp_rx_ts(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[640]; + u64 tsf64; + u32 tstamp; + u64 mactime; + u64 hwtstamp_ns; + u64 rx_ext; + s64 rx_ext_delta; + s64 rx_ext_mactime_delta; + s64 rx_tsf_offset; + u32 rx_read_dur_ns; + u64 rx_rebase; + u64 rx_drop; + u64 epoch_drop; + u64 rx_hwts; + u64 rx_ptp_seen; + u64 rx_ptp_done; + u64 rx_ptp_miss; + u8 rx_last_msgtype; + u16 rx_last_seqid; + u64 rebase_drop; + s64 stall_rx_delta; + u64 stall_rx_cnt; + u8 stall_en; + u32 stall_thr; + int len; + + tsf64 = READ_ONCE(sc->ptp_last_rx_tsf64); + tstamp = READ_ONCE(sc->ptp_last_rx_tstamp); + mactime = READ_ONCE(sc->ptp_last_rx_mactime); + hwtstamp_ns = READ_ONCE(sc->ptp_last_rx_hwtstamp_ns); + rx_ext = READ_ONCE(sc->ptp_sample_rx.ext); + rx_read_dur_ns = READ_ONCE(sc->ptp_sample_rx.read_dur_ns); + rx_ext_delta = (s64)rx_ext - (s64)tsf64; + rx_ext_mactime_delta = (s64)rx_ext - (s64)mactime; + rx_tsf_offset = READ_ONCE(sc->ptp_last_rx_tsf_offset); + rx_rebase = READ_ONCE(sc->ptp_last_rx_rebase); + rx_drop = READ_ONCE(sc->ptp_rx_drop_cnt); + epoch_drop = READ_ONCE(sc->ptp_epoch_drop_cnt); + rx_hwts = READ_ONCE(sc->ptp_rx_hwts_cnt); + rx_ptp_seen = READ_ONCE(sc->ptp_rx_ptp_seen); + rx_ptp_done = READ_ONCE(sc->ptp_rx_hwts_done); + rx_ptp_miss = READ_ONCE(sc->ptp_rx_hwts_miss); + rx_last_msgtype = READ_ONCE(sc->ptp_last_rx_msgtype); + rx_last_seqid = READ_ONCE(sc->ptp_last_rx_seqid); + rebase_drop = READ_ONCE(sc->ptp_rebase_drop_cnt); + stall_rx_delta = READ_ONCE(sc->ptp_stall_last_rx_delta_ns); + stall_rx_cnt = READ_ONCE(sc->ptp_stall_rx_cnt); + stall_en = READ_ONCE(sc->ptp_stall_enable); + stall_thr = READ_ONCE(sc->ptp_stall_thresh_ns); + + len = scnprintf(buf, sizeof(buf), + "rx_tsf64=%llu rx_tstamp=%u rx_mactime=%llu " + "rx_ext=%llu rx_ext_delta=%lld rx_ext_mactime_delta=%lld " + "rx_read_dur_ns=%u rx_hwtstamp_ns=%llu rx_tsf_offset=%lld rx_rebase=%llu " + "rx_drop=%llu epoch_drop=%llu rx_hwts=%llu " + "rx_ptp_seen=%llu rx_ptp_done=%llu rx_ptp_miss=%llu " + "rx_last_msgtype=%u rx_last_seq=%u " + "rebase_drop=%llu stall_en=%u stall_thr_ns=%u " + "stall_rx_delta_ns=%lld stall_rx_cnt=%llu\n", + (unsigned long long)tsf64, + tstamp, + (unsigned long long)mactime, + (unsigned long long)rx_ext, + (long long)rx_ext_delta, + (long long)rx_ext_mactime_delta, + rx_read_dur_ns, + (unsigned long long)hwtstamp_ns, + (long long)rx_tsf_offset, + (unsigned long long)rx_rebase, + (unsigned long long)rx_drop, + (unsigned long long)epoch_drop, + (unsigned long long)rx_hwts, + (unsigned long long)rx_ptp_seen, + (unsigned long long)rx_ptp_done, + (unsigned long long)rx_ptp_miss, + rx_last_msgtype, + rx_last_seqid, + (unsigned long long)rebase_drop, + stall_en, + stall_thr, + (long long)stall_rx_delta, + (unsigned long long)stall_rx_cnt); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_rx_ts = { + .read = read_file_ptp_rx_ts, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Expose last TX timestamp sample and counters via debugfs. */ +static ssize_t read_file_ptp_tx_ts(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[1024]; + u64 tsf64; + u32 tstamp; + u32 duration; + u32 tx_read_dur_ns; + u64 hwtstamp_ns; + u64 tx_ext; + s64 tx_tsf_offset; + u64 tx_rebase; + u64 ptp_seen; + u64 ts_req; + u64 ts_done; + u64 ts_miss; + u64 noack_cnt; + u64 suspect_cnt; + u64 suspect_hwtstamp_ns; + u32 suspect_tstamp; + u8 tx_last_msgtype; + u8 noack_enable; + u8 noack_msgtype; + u8 suspect_msgtype; + u8 suspect_rateindex; + u8 suspect_shortretry; + u8 suspect_longretry; + u8 suspect_status; + u8 suspect_flags; + u16 tx_last_seqid; + u16 noack_seqid; + u16 suspect_seqid; + s64 stall_tx_delta; + u64 stall_tx_cnt; + u8 stall_en; + u32 stall_thr; + int len; + + tsf64 = READ_ONCE(sc->ptp_sample_tx.tsf64); + tstamp = READ_ONCE(sc->ptp_sample_tx.tstamp); + duration = READ_ONCE(sc->ptp_sample_tx.duration); + tx_read_dur_ns = READ_ONCE(sc->ptp_sample_tx.read_dur_ns); + tx_ext = READ_ONCE(sc->ptp_sample_tx.ext); + hwtstamp_ns = READ_ONCE(sc->ptp_last_tx_hwtstamp_ns); + tx_tsf_offset = READ_ONCE(sc->ptp_last_tx_tsf_offset); + tx_rebase = READ_ONCE(sc->ptp_last_tx_rebase); + ptp_seen = READ_ONCE(sc->ptp_tx_ptp_seen); + ts_req = READ_ONCE(sc->ptp_tx_tstamp_req); + ts_done = READ_ONCE(sc->ptp_tx_tstamp_done); + ts_miss = READ_ONCE(sc->ptp_tx_tstamp_miss); + noack_enable = READ_ONCE(sc->ptp_noack_ptp_event_enable); + noack_cnt = READ_ONCE(sc->ptp_tx_noack_event_cnt); + noack_msgtype = READ_ONCE(sc->ptp_tx_noack_last_msgtype); + noack_seqid = READ_ONCE(sc->ptp_tx_noack_last_seqid); + suspect_cnt = READ_ONCE(sc->ptp_tx_suspect_status_cnt); + suspect_msgtype = READ_ONCE(sc->ptp_tx_suspect_status_last_msgtype); + suspect_seqid = READ_ONCE(sc->ptp_tx_suspect_status_last_seqid); + suspect_rateindex = READ_ONCE(sc->ptp_tx_suspect_status_last_rateindex); + suspect_shortretry = READ_ONCE(sc->ptp_tx_suspect_status_last_shortretry); + suspect_longretry = READ_ONCE(sc->ptp_tx_suspect_status_last_longretry); + suspect_status = READ_ONCE(sc->ptp_tx_suspect_status_last_status); + suspect_flags = READ_ONCE(sc->ptp_tx_suspect_status_last_flags); + suspect_tstamp = READ_ONCE(sc->ptp_tx_suspect_status_last_tstamp); + suspect_hwtstamp_ns = READ_ONCE(sc->ptp_tx_suspect_status_last_hwtstamp_ns); + tx_last_msgtype = READ_ONCE(sc->ptp_last_tx_msgtype); + tx_last_seqid = READ_ONCE(sc->ptp_last_tx_seqid); + stall_tx_delta = READ_ONCE(sc->ptp_stall_last_tx_delta_ns); + stall_tx_cnt = READ_ONCE(sc->ptp_stall_tx_cnt); + stall_en = READ_ONCE(sc->ptp_stall_enable); + stall_thr = READ_ONCE(sc->ptp_stall_thresh_ns); + + len = scnprintf(buf, sizeof(buf), + "tx_tsf64=%llu tx_tstamp=%u tx_duration=%u tx_read_dur_ns=%u " + "tx_ext=%llu tx_hwtstamp_ns=%llu " + "tx_tsf_offset=%lld tx_rebase=%llu " + "tx_ptp_seen=%llu tx_tstamp_req=%llu " + "tx_tstamp_done=%llu tx_tstamp_miss=%llu " + "tx_last_msgtype=%u tx_last_seq=%u " + "noack_event_enable=%u noack_event_cnt=%llu " + "noack_last_msgtype=%u noack_last_seq=%u " + "suspect_status_cnt=%llu " + "suspect_status_last_msgtype=%u suspect_status_last_seq=%u " + "suspect_status_last_rateidx=%u suspect_status_last_shortretry=%u " + "suspect_status_last_longretry=%u suspect_status_last_status=0x%02x " + "suspect_status_last_flags=0x%02x suspect_status_last_tstamp=%u " + "suspect_status_last_hwtstamp_ns=%llu " + "stall_en=%u stall_thr_ns=%u " + "stall_tx_delta_ns=%lld stall_tx_cnt=%llu\n", + (unsigned long long)tsf64, + tstamp, + duration, + tx_read_dur_ns, + (unsigned long long)tx_ext, + (unsigned long long)hwtstamp_ns, + (long long)tx_tsf_offset, + (unsigned long long)tx_rebase, + (unsigned long long)ptp_seen, + (unsigned long long)ts_req, + (unsigned long long)ts_done, + (unsigned long long)ts_miss, + tx_last_msgtype, + tx_last_seqid, + noack_enable, + (unsigned long long)noack_cnt, + noack_msgtype, + noack_seqid, + (unsigned long long)suspect_cnt, + suspect_msgtype, + suspect_seqid, + suspect_rateindex, + suspect_shortretry, + suspect_longretry, + suspect_status, + suspect_flags, + suspect_tstamp, + (unsigned long long)suspect_hwtstamp_ns, + stall_en, + stall_thr, + (long long)stall_tx_delta, + (unsigned long long)stall_tx_cnt); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_tx_ts = { + .read = read_file_ptp_tx_ts, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Aggregate HW timestamp misses (RX + pending TX). */ +static ssize_t read_file_ptp_hwts_miss(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[128]; + u64 rx_miss = READ_ONCE(sc->ptp_rx_hwts_miss); + u64 tx_req = READ_ONCE(sc->ptp_tx_tstamp_req); + u64 tx_done = READ_ONCE(sc->ptp_tx_tstamp_done); + u64 tx_miss = (tx_req > tx_done) ? (tx_req - tx_done) : 0; + u64 total = rx_miss + tx_miss; + int len; + + len = scnprintf(buf, sizeof(buf), + "rx_miss=%llu tx_pending=%llu total=%llu\n", + (unsigned long long)rx_miss, + (unsigned long long)tx_miss, + (unsigned long long)total); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_hwts_miss = { + .read = read_file_ptp_hwts_miss, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Aggregate settsf drops vs. passes. */ +static ssize_t read_file_ptp_settsf_dur_drop(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[192]; + u64 drop = READ_ONCE(sc->ptp_settsf_dur_drop_cnt); + u64 pass = READ_ONCE(sc->ptp_settsf_dur_pass_cnt); + u64 total = drop + pass; + u64 pct_x100 = total ? div64_u64(drop * 10000ULL, total) : 0; + u64 pct_int = pct_x100 / 100; + u64 pct_frac = pct_x100 % 100; + u64 guard_block = READ_ONCE(sc->ptp_guard_block_cnt); + u64 guard_timeout = READ_ONCE(sc->ptp_guard_timeout_cnt); + int len; + + len = scnprintf(buf, sizeof(buf), + "drop=%llu pass=%llu total=%llu drop_pct=%llu.%02llu " + "guard_block=%llu guard_timeout=%llu\n", + (unsigned long long)drop, + (unsigned long long)pass, + (unsigned long long)total, + (unsigned long long)pct_int, + (unsigned long long)pct_frac, + (unsigned long long)guard_block, + (unsigned long long)guard_timeout); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_settsf_dur_drop = { + .read = read_file_ptp_settsf_dur_drop, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* TSF register access counters. */ +static ssize_t read_file_ptp_tsf_access(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_hw *ah = sc->sc_ah; + char buf[256]; + int len; + + len = scnprintf(buf, sizeof(buf), + "get64=%llu get32=%llu set=%llu reset=%llu " + "cc_read=%llu aic_read=%llu\n", + (unsigned long long)atomic64_read(&ah->ptp_tsf_get64_cnt), + (unsigned long long)atomic64_read(&ah->ptp_tsf_get32_cnt), + (unsigned long long)atomic64_read(&ah->ptp_tsf_set_cnt), + (unsigned long long)atomic64_read(&ah->ptp_tsf_reset_cnt), + (unsigned long long)atomic64_read(&ah->ptp_tsf_cc_read_cnt), + (unsigned long long)atomic64_read(&ah->ptp_tsf_aic_read_cnt)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_tsf_access = { + .read = read_file_ptp_tsf_access, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static const char *ath9k_ptp_msgtype_name(u8 t) +{ + switch (t) { + case 0: + return "Sync"; + case 1: + return "Delay_Req"; + case 2: + return "Pdelay_Req"; + case 3: + return "Pdelay_Resp"; + case 8: + return "Follow_Up"; + case 9: + return "Delay_Resp"; + case 10: + return "Pdelay_Resp_FU"; + case 11: + return "Announce"; + case 12: + return "Signaling"; + case 13: + return "Management"; + default: + return "Other"; + } +} + +static ssize_t read_file_ptp_rx_types(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[1536]; + int len = 0; + int i; + + len += scnprintf(buf + len, sizeof(buf) - len, + "last_msgtype=%u last_seq=%u\n", + READ_ONCE(sc->ptp_last_rx_msgtype), + READ_ONCE(sc->ptp_last_rx_seqid)); + for (i = 0; i < ATH9K_PTP_MSGTYPE_MAX; i++) { + len += scnprintf(buf + len, sizeof(buf) - len, + "type=%d name=%s seen=%llu done=%llu miss=%llu\n", + i, ath9k_ptp_msgtype_name(i), + (unsigned long long)READ_ONCE(sc->ptp_rx_type_seen[i]), + (unsigned long long)READ_ONCE(sc->ptp_rx_type_done[i]), + (unsigned long long)READ_ONCE(sc->ptp_rx_type_miss[i])); + if (len >= (int)sizeof(buf)) + break; + } + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_rx_types = { + .read = read_file_ptp_rx_types, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_ptp_tx_types(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[1536]; + int len = 0; + int i; + + len += scnprintf(buf + len, sizeof(buf) - len, + "last_msgtype=%u last_seq=%u\n", + READ_ONCE(sc->ptp_last_tx_msgtype), + READ_ONCE(sc->ptp_last_tx_seqid)); + for (i = 0; i < ATH9K_PTP_MSGTYPE_MAX; i++) { + len += scnprintf(buf + len, sizeof(buf) - len, + "type=%d name=%s seen=%llu req=%llu done=%llu miss=%llu\n", + i, ath9k_ptp_msgtype_name(i), + (unsigned long long)READ_ONCE(sc->ptp_tx_type_seen[i]), + (unsigned long long)READ_ONCE(sc->ptp_tx_type_req[i]), + (unsigned long long)READ_ONCE(sc->ptp_tx_type_done[i]), + (unsigned long long)READ_ONCE(sc->ptp_tx_type_miss[i])); + if (len >= (int)sizeof(buf)) + break; + } + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_tx_types = { + .read = read_file_ptp_tx_types, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; - dirty_ns += delta; - sc->ptp_dirtyts = dirty_ns; +static ssize_t read_file_ptp_settsf_last_jump(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[64]; + int len; + + len = scnprintf(buf, sizeof(buf), "%lld\n", + (long long)READ_ONCE(sc->ptp_settsf_last_jump_ns)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_settsf_last_jump = { + .read = read_file_ptp_settsf_last_jump, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Expose wrap extender and rebase state via debugfs. */ +static ssize_t read_file_ptp_wrap(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + u32 rx_last = READ_ONCE(sc->ptp_rx_wrap_last); + u64 rx_ext = READ_ONCE(sc->ptp_rx_wrap_ext); + u8 rx_valid = READ_ONCE(sc->ptp_rx_wrap_valid); + u32 tx_last = READ_ONCE(sc->ptp_tx_wrap_last); + u64 tx_ext = READ_ONCE(sc->ptp_tx_wrap_ext); + u8 tx_valid = READ_ONCE(sc->ptp_tx_wrap_valid); + u64 rebases = READ_ONCE(sc->ptp_wrap_rebase_cnt); + u64 rebase_tsf = READ_ONCE(sc->ptp_wrap_rebase_tsf); + u64 rx_wraps = rx_ext >> 32; + u64 tx_wraps = tx_ext >> 32; + u64 raw_tsf = ath9k_hw_gettsf64(sc->sc_ah); + u32 raw_low = (u32)raw_tsf; + u32 raw_last; + u64 raw_wraps; + u8 raw_valid; + s64 tsf_offset = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + s64 rebase_old_offset_ns = READ_ONCE(sc->ptp_rebase_old_offset_ns); + s64 rebase_new_offset_ns = READ_ONCE(sc->ptp_rebase_new_offset_ns); + s64 rebase_anchor_rem_ns = READ_ONCE(sc->ptp_rebase_anchor_rem_ns); + u32 rebase_drop_ns = READ_ONCE(sc->ptp_rebase_drop_ns); + u64 rebase_drop_until_ns = READ_ONCE(sc->ptp_rebase_drop_until_ns); + u64 rebase_drop_cnt = READ_ONCE(sc->ptp_rebase_drop_cnt); + u64 phc_tsf = (u64)((s64)raw_tsf + tsf_offset); + char buf[768]; + int len; + unsigned long flags; + + spin_lock_irqsave(&sc->systim_lock, flags); + if (sc->ptp_raw_tsf_rebase_seen != rebases) { + sc->ptp_raw_tsf_rebase_seen = rebases; + sc->ptp_raw_tsf_valid = 0; + } + + if (sc->ptp_raw_tsf_valid) { + if (raw_low < sc->ptp_raw_tsf_last) + sc->ptp_raw_tsf_wraps++; + } else { + sc->ptp_raw_tsf_valid = 1; + } + + sc->ptp_raw_tsf_last = raw_low; + raw_last = sc->ptp_raw_tsf_last; + raw_wraps = sc->ptp_raw_tsf_wraps; + raw_valid = sc->ptp_raw_tsf_valid; + spin_unlock_irqrestore(&sc->systim_lock, flags); + + len = scnprintf(buf, sizeof(buf), + "rx valid=%u last=0x%08x ext=%llu wraps=%llu\n" + "tx valid=%u last=0x%08x ext=%llu wraps=%llu\n" + "rebases=%llu last_rebase_tsf=%llu\n" + "rebase_old_offset_ns=%lld rebase_new_offset_ns=%lld " + "rebase_anchor_rem_ns=%lld\n" + "rebase_drop_ns=%u rebase_drop_until_ns=%llu " + "rebase_drop_cnt=%llu\n" + "raw_tsf=%llu raw_low=0x%08x raw_last=0x%08x " + "raw_wraps=%llu raw_valid=%u\n" + "phc_tsf=%llu tsf_offset=%lld\n", + rx_valid, rx_last, + (unsigned long long)rx_ext, + (unsigned long long)rx_wraps, + tx_valid, tx_last, + (unsigned long long)tx_ext, + (unsigned long long)tx_wraps, + (unsigned long long)rebases, + (unsigned long long)rebase_tsf, + (long long)rebase_old_offset_ns, + (long long)rebase_new_offset_ns, + (long long)rebase_anchor_rem_ns, + rebase_drop_ns, + (unsigned long long)rebase_drop_until_ns, + (unsigned long long)rebase_drop_cnt, + (unsigned long long)raw_tsf, + raw_low, + raw_last, + (unsigned long long)raw_wraps, + raw_valid, + (unsigned long long)phc_tsf, + (long long)tsf_offset); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_wrap = { + .read = read_file_ptp_wrap, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Map evtlog event id to a string. */ +static const char *ath9k_ptp_evtlog_name(u8 event) +{ + switch (event) { + case ATH9K_PTP_EVT_RX: + return "rx"; + case ATH9K_PTP_EVT_TX: + return "tx"; + case ATH9K_PTP_EVT_REBASE: + return "rebase"; + case ATH9K_PTP_EVT_ADJTIME: + return "adjtime"; + case ATH9K_PTP_EVT_ADJFINE: + return "adjfine"; + case ATH9K_PTP_EVT_SETTIME: + return "settime"; + case ATH9K_PTP_EVT_SETTSF: + return "settsf"; + case ATH9K_PTP_EVT_CC_GLITCH: + return "cc_glitch"; + case ATH9K_PTP_EVT_RESET_TSF: + return "resettsf"; + case ATH9K_PTP_EVT_PHC_GET: + return "phc_get"; + default: + return "unk"; + } +} + +/* Map tc trace event id to a string. */ +static const char *ath9k_ptp_tc_trace_event_name(u8 event) +{ + switch (event) { + case ATH9K_PTP_TC_TRACE_CYC2TIME: + return "cyc2time"; + case ATH9K_PTP_TC_TRACE_READ: + return "read"; + case ATH9K_PTP_TC_TRACE_SET_CYCLE: + return "set_cycle"; + default: + return "unk"; + } +} + +/* Map tc trace reason id to a string. */ +static const char *ath9k_ptp_tc_trace_reason_name(u8 reason) +{ + switch (reason) { + case ATH9K_PTP_TC_TRACE_RSN_RX: + return "rx"; + case ATH9K_PTP_TC_TRACE_RSN_TX: + return "tx"; + case ATH9K_PTP_TC_TRACE_RSN_GETTIME: + return "gettime"; + case ATH9K_PTP_TC_TRACE_RSN_ADJTIME_PRE: + return "adjtime_pre"; + case ATH9K_PTP_TC_TRACE_RSN_ADJTIME_POST: + return "adjtime_post"; + case ATH9K_PTP_TC_TRACE_RSN_SETTIME_PRE: + return "settime_pre"; + case ATH9K_PTP_TC_TRACE_RSN_SETTIME_POST: + return "settime_post"; + case ATH9K_PTP_TC_TRACE_RSN_ADJFINE: + return "adjfine"; + default: + return "none"; + } +} + +static const char *ath9k_ptp_tc_anomaly_source_name(u8 source) +{ + switch (source) { + case ATH9K_PTP_TC_ANOM_SRC_CC: + return "cc"; + case ATH9K_PTP_TC_ANOM_SRC_TC: + return "tc"; + default: + return "none"; + } +} + +static const char *ath9k_ptp_tc_mutation_source_name(u8 source) +{ + switch (source) { + case ATH9K_PTP_TC_MUT_INIT: + return "init"; + case ATH9K_PTP_TC_MUT_CC_SHIFT: + return "cc_shift"; + case ATH9K_PTP_TC_MUT_ADJTIME: + return "adjtime"; + case ATH9K_PTP_TC_MUT_SETTIME: + return "settime"; + default: + return "none"; + } +} + +/* Dump the PTP event ring buffer via debugfs. */ +static ssize_t read_file_ptp_evtlog(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + const size_t buf_sz = 1048576; + char *buf; + size_t len = 0; + u32 seq; + u32 start; + u32 ring_len; + u32 ring_mask; + u32 i; + int ret; + + buf = kvmalloc(buf_sz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + seq = (u32)atomic_read(&sc->ptp_evtlog_seq); + ring_len = READ_ONCE(sc->ptp_evtlog_len); + ring_mask = READ_ONCE(sc->ptp_evtlog_mask); + start = (ring_len && seq >= ring_len) ? + (seq - ring_len + 1) : 1; + + len += scnprintf(buf + len, buf_sz - len, + "seq=%u start=%u enable=%u len=%u\n", + seq, start, READ_ONCE(sc->ptp_evtlog_enable), + ring_len); + + if (!ring_len || !sc->ptp_evtlog) { + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kvfree(buf); + return ret; + } + + for (i = start; i <= seq && len < buf_sz - 1024; i++) { + struct ath9k_ptp_evtlog_entry *e; + u32 e_seq; + u8 event; + u32 tstamp; + u32 tstamp_last; + u32 tstamp_delta; + s32 tstamp_delta_s; + u32 duration; + u32 duration_mid; + u64 ext_prev; + u64 ext; + u64 hwtstamp_ns; + s64 tsf_offset; + u64 phc_tsf; + u64 wraps; + u64 rebases; + u64 adj_seq; + u8 wrap_valid; + u8 settsf_dur_hist_max; + u8 settsf_dur_hist_cnt; + u8 anchor_valid; + u64 anchor_phc; + u64 set_old_ns; + u64 set_new_ns; + s64 set_delta; + u64 settsf_old; + u64 settsf_new; + s64 settsf_delta; + u64 settsf_read_start_ns; + u64 settsf_read_end_ns; + u64 settsf_write_start_ns; + u64 settsf_write_end_ns; + u32 settsf_read_ns; + u32 settsf_write_ns; + u32 settsf_total_ns; + u32 settsf_epoch; + s64 settsf_epoch_offset_ns; + u8 settsf_epoch_valid; + u32 tsf_hi1; + u32 tsf_lo; + u32 tsf_hi2; + u64 cc_last; + u64 cc_new; + s64 cc_delta; + u64 sample_cycle; + u64 sample_ns; + u64 sample_rebase_cnt; + s64 sample_tsf_offset; + u32 sample_epoch; + u8 sample_epoch_valid; + u64 tc_cycle_last; + u64 tc_nsec; + u64 tc_frac; + u64 cc_mult; + u64 cc_mask; + u32 cc_shift; + u8 ptp_msgtype; + u8 ptp_valid; + u16 ptp_seqid; + u64 ptp_ts_ns; + u8 ptp_ts_valid; + u16 hdr_len; + u16 payload_len; + u8 hdr[ATH9K_PTP_EVTLOG_HDR_LEN]; + u8 payload[ATH9K_PTP_EVTLOG_PAYLOAD_LEN]; + u8 stack_len; + unsigned long stack_entries[ATH9K_PTP_STACK_DEPTH]; + u32 caller_pid; + char caller_comm[TASK_COMM_LEN]; + char warn_buf[64]; + size_t warn_len = 0; + u16 hdr_cap; + u16 payload_cap; + int j; + + e = &sc->ptp_evtlog[i & ring_mask]; + e_seq = READ_ONCE(e->seq); + if (e_seq != i) + continue; + + event = READ_ONCE(e->event); + tstamp = READ_ONCE(e->tstamp); + tstamp_last = READ_ONCE(e->tstamp_last); + tstamp_delta = READ_ONCE(e->tstamp_delta); + tstamp_delta_s = READ_ONCE(e->tstamp_delta_s); + duration = READ_ONCE(e->duration); + duration_mid = READ_ONCE(e->duration_mid); + ext_prev = READ_ONCE(e->ext_prev); + ext = READ_ONCE(e->ext); + hwtstamp_ns = READ_ONCE(e->hwtstamp_ns); + tsf_offset = READ_ONCE(e->tsf_offset); + phc_tsf = READ_ONCE(e->phc_tsf); + wraps = READ_ONCE(e->wraps); + rebases = READ_ONCE(e->rebases); + adj_seq = READ_ONCE(e->adj_seq); + wrap_valid = READ_ONCE(e->wrap_valid); + settsf_dur_hist_max = READ_ONCE(e->settsf_dur_hist_max); + settsf_dur_hist_cnt = READ_ONCE(e->settsf_dur_hist_cnt); + anchor_valid = READ_ONCE(e->anchor_valid); + anchor_phc = READ_ONCE(e->anchor_phc); + set_old_ns = READ_ONCE(e->settime_old_ns); + set_new_ns = READ_ONCE(e->settime_new_ns); + set_delta = READ_ONCE(e->settime_delta); + settsf_old = READ_ONCE(e->settsf_old); + settsf_new = READ_ONCE(e->settsf_new); + settsf_delta = READ_ONCE(e->settsf_delta); + settsf_read_start_ns = READ_ONCE(e->settsf_read_start_ns); + settsf_read_end_ns = READ_ONCE(e->settsf_read_end_ns); + settsf_write_start_ns = READ_ONCE(e->settsf_write_start_ns); + settsf_write_end_ns = READ_ONCE(e->settsf_write_end_ns); + settsf_read_ns = READ_ONCE(e->settsf_read_ns); + settsf_write_ns = READ_ONCE(e->settsf_write_ns); + settsf_total_ns = READ_ONCE(e->settsf_total_ns); + settsf_epoch = READ_ONCE(e->settsf_epoch); + settsf_epoch_offset_ns = READ_ONCE(e->settsf_epoch_offset_ns); + settsf_epoch_valid = READ_ONCE(e->settsf_epoch_valid); + tsf_hi1 = READ_ONCE(e->tsf_hi1); + tsf_lo = READ_ONCE(e->tsf_lo); + tsf_hi2 = READ_ONCE(e->tsf_hi2); + cc_last = READ_ONCE(e->cc_last); + cc_new = READ_ONCE(e->cc_new); + cc_delta = READ_ONCE(e->cc_delta); + sample_cycle = READ_ONCE(e->sample_cycle); + sample_ns = READ_ONCE(e->sample_ns); + sample_rebase_cnt = READ_ONCE(e->sample_rebase_cnt); + sample_tsf_offset = READ_ONCE(e->sample_tsf_offset); + sample_epoch = READ_ONCE(e->sample_epoch); + sample_epoch_valid = READ_ONCE(e->sample_epoch_valid); + tc_cycle_last = READ_ONCE(e->tc_cycle_last); + tc_nsec = READ_ONCE(e->tc_nsec); + tc_frac = READ_ONCE(e->tc_frac); + cc_mult = READ_ONCE(e->cc_mult); + cc_mask = READ_ONCE(e->cc_mask); + cc_shift = READ_ONCE(e->cc_shift); + ptp_msgtype = READ_ONCE(e->ptp_msgtype); + ptp_valid = READ_ONCE(e->ptp_valid); + ptp_seqid = READ_ONCE(e->ptp_seqid); + ptp_ts_ns = READ_ONCE(e->ptp_ts_ns); + ptp_ts_valid = READ_ONCE(e->ptp_ts_valid); + hdr_len = READ_ONCE(e->hdr_len); + payload_len = READ_ONCE(e->payload_len); + memcpy(hdr, e->hdr, sizeof(hdr)); + memcpy(payload, e->payload, sizeof(payload)); + stack_len = READ_ONCE(e->stack_len); + if (stack_len > ATH9K_PTP_STACK_DEPTH) + stack_len = ATH9K_PTP_STACK_DEPTH; + if (stack_len) + memcpy(stack_entries, e->stack_entries, + stack_len * sizeof(stack_entries[0])); + caller_pid = READ_ONCE(e->caller_pid); + memcpy(caller_comm, e->caller_comm, sizeof(caller_comm)); + caller_comm[sizeof(caller_comm) - 1] = '\0'; + warn_buf[0] = '\0'; + + if (ext_prev && ext < ext_prev) + warn_len += scnprintf(warn_buf + warn_len, + sizeof(warn_buf) - warn_len, + "ext_regress "); + if (tstamp_delta > 0x10000000U) + warn_len += scnprintf(warn_buf + warn_len, + sizeof(warn_buf) - warn_len, + "delta_large "); + + len += scnprintf(buf + len, buf_sz - len, + "%u %s tsf64=%llu tstamp=0x%08x last=0x%08x " + "delta=%u sdelta=%d duration=%u " + "ext_prev=%llu ext=%llu hwtstamp_ns=%llu " + "tsf_offset=%lld phc_tsf=%llu wraps=%llu " + "rebases=%llu adj_seq=%llu wrap_valid=%u " + "anchor_valid=%u anchor_phc=%llu " + "set_old=%llu set_new=%llu set_delta=%lld " + "caller_pid=%u caller_comm=%s " + "sample_cycle=%llu sample_ns=%llu " + "sample_rebase_cnt=%llu " + "sample_tsf_offset=%lld " + "sample_epoch=%u sample_epoch_valid=%u " + "tc_cycle_last=%llu tc_nsec=%llu tc_frac=%llu " + "cc_mult=%llu cc_shift=%u cc_mask=%llu " + "ptp_valid=%u ptp_msg=%u ptp_seq=%u " + "ptp_ts_valid=%u ptp_ts_ns=%llu", + e_seq, + ath9k_ptp_evtlog_name(event), + (unsigned long long)READ_ONCE(e->tsf64), + tstamp, + tstamp_last, + tstamp_delta, + tstamp_delta_s, + duration, + (unsigned long long)ext_prev, + (unsigned long long)ext, + (unsigned long long)hwtstamp_ns, + (long long)tsf_offset, + (unsigned long long)phc_tsf, + (unsigned long long)wraps, + (unsigned long long)rebases, + (unsigned long long)adj_seq, + wrap_valid, + anchor_valid, + (unsigned long long)anchor_phc, + (unsigned long long)set_old_ns, + (unsigned long long)set_new_ns, + (long long)set_delta, + caller_pid, + caller_comm, + (unsigned long long)sample_cycle, + (unsigned long long)sample_ns, + (unsigned long long)sample_rebase_cnt, + (long long)sample_tsf_offset, + sample_epoch, + sample_epoch_valid, + (unsigned long long)tc_cycle_last, + (unsigned long long)tc_nsec, + (unsigned long long)tc_frac, + (unsigned long long)cc_mult, + cc_shift, + (unsigned long long)cc_mask, + ptp_valid, + ptp_msgtype, + ptp_seqid, + ptp_ts_valid, + (unsigned long long)ptp_ts_ns); + if (event == ATH9K_PTP_EVT_SETTSF || + event == ATH9K_PTP_EVT_RESET_TSF) + len += scnprintf(buf + len, buf_sz - len, + " settsf_old=%llu settsf_new=%llu " + "settsf_delta=%lld " + "settsf_read_ns=%u " + "settsf_write_ns=%u " + "settsf_total_ns=%u " + "settsf_read_start_ns=%llu " + "settsf_read_end_ns=%llu " + "settsf_write_start_ns=%llu " + "settsf_write_end_ns=%llu " + "settsf_epoch=%u " + "settsf_epoch_valid=%u " + "settsf_epoch_offset_ns=%lld", + (unsigned long long)settsf_old, + (unsigned long long)settsf_new, + (long long)settsf_delta, + settsf_read_ns, + settsf_write_ns, + settsf_total_ns, + (unsigned long long)settsf_read_start_ns, + (unsigned long long)settsf_read_end_ns, + (unsigned long long)settsf_write_start_ns, + (unsigned long long)settsf_write_end_ns, + settsf_epoch, + settsf_epoch_valid, + (long long)settsf_epoch_offset_ns); + if (event == ATH9K_PTP_EVT_REBASE) + len += scnprintf(buf + len, buf_sz - len, + " rebase_dur_ns=%u rebase_dur_mid_ns=%u " + "hist_max=%u hist_cnt=%u " + "rebase_old_offset_ns=%lld " + "rebase_new_offset_ns=%lld", + duration, + duration_mid, + settsf_dur_hist_max, + settsf_dur_hist_cnt, + (long long)set_delta, + (long long)settsf_delta); + if (event == ATH9K_PTP_EVT_CC_GLITCH) + len += scnprintf(buf + len, buf_sz - len, + " cc_last=%llu cc_new=%llu " + "cc_delta=%lld tsf_hi1=0x%08x " + "tsf_lo=0x%08x tsf_hi2=0x%08x", + (unsigned long long)cc_last, + (unsigned long long)cc_new, + (long long)cc_delta, + tsf_hi1, tsf_lo, tsf_hi2); + hdr_cap = min_t(u16, hdr_len, (u16)ATH9K_PTP_EVTLOG_HDR_LEN); + payload_cap = min_t(u16, payload_len, + (u16)ATH9K_PTP_EVTLOG_PAYLOAD_LEN); + if (hdr_len || payload_len) { + len += scnprintf(buf + len, buf_sz - len, + " hdr_len=%u payload_len=%u", + hdr_len, payload_len); + if (hdr_cap) { + len += scnprintf(buf + len, buf_sz - len, + " hdr="); + for (j = 0; j < hdr_cap && + len < buf_sz - 8; j++) + len += scnprintf(buf + len, + buf_sz - len, + "%02x", hdr[j]); + } + if (payload_cap) { + len += scnprintf(buf + len, buf_sz - len, + " payload="); + for (j = 0; j < payload_cap && + len < buf_sz - 8; j++) + len += scnprintf(buf + len, + buf_sz - len, + "%02x", + payload[j]); + } + } + if (stack_len) { + len += scnprintf(buf + len, buf_sz - len, + " stack_len=%u stack=", + stack_len); + for (j = 0; j < stack_len && + len < buf_sz - 64; j++) + len += scnprintf(buf + len, buf_sz - len, + "%s%pS", + j ? "," : "", + (void *)stack_entries[j]); + } + if (warn_len) + len += scnprintf(buf + len, buf_sz - len, + " warn=%s", warn_buf); + len += scnprintf(buf + len, buf_sz - len, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kvfree(buf); + return ret; +} + +static const struct file_operations fops_ptp_evtlog = { + .read = read_file_ptp_evtlog, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Dump the timecounter trace ring buffer via debugfs. */ +static ssize_t read_file_ptp_tc_trace(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + const size_t buf_sz = 524288; + char *buf; + size_t len = 0; + u32 seq; + u32 start; + u32 ring_len; + u32 ring_mask; + u32 i; + int ret; + + buf = kvmalloc(buf_sz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + seq = (u32)atomic_read(&sc->ptp_tc_trace_seq); + ring_len = READ_ONCE(sc->ptp_tc_trace_len); + ring_mask = READ_ONCE(sc->ptp_tc_trace_mask); + start = (ring_len && seq >= ring_len) ? + (seq - ring_len + 1) : 1; + + len += scnprintf(buf + len, buf_sz - len, + "seq=%u start=%u enable=%u len=%u\n", + seq, start, READ_ONCE(sc->ptp_tc_trace_enable), + ring_len); + + if (!ring_len || !sc->ptp_tc_trace) { + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kvfree(buf); + return ret; + } + + for (i = start; i <= seq && len < buf_sz - 256; i++) { + struct ath9k_ptp_tc_trace_entry *e; + u32 e_seq; + u8 event; + u8 reason; + u8 backward; + u64 ts_ns; + u64 cycle_in; + u64 cycle_last; + u64 nsec; + u64 frac; + u64 delta; + u64 ns_offset; + u64 ns_out; + u64 cc_mult; + u64 cc_mask; + u32 cc_shift; + u32 caller_pid; + char caller_comm[TASK_COMM_LEN]; + + e = &sc->ptp_tc_trace[i & ring_mask]; + e_seq = READ_ONCE(e->seq); + if (e_seq != i) + continue; + + event = READ_ONCE(e->event); + reason = READ_ONCE(e->reason); + backward = READ_ONCE(e->backward); + ts_ns = READ_ONCE(e->ts_ns); + cycle_in = READ_ONCE(e->cycle_in); + cycle_last = READ_ONCE(e->cycle_last); + nsec = READ_ONCE(e->nsec); + frac = READ_ONCE(e->frac); + delta = READ_ONCE(e->delta); + ns_offset = READ_ONCE(e->ns_offset); + ns_out = READ_ONCE(e->ns_out); + cc_mult = READ_ONCE(e->cc_mult); + cc_mask = READ_ONCE(e->cc_mask); + cc_shift = READ_ONCE(e->cc_shift); + caller_pid = READ_ONCE(e->caller_pid); + memcpy(caller_comm, e->caller_comm, sizeof(caller_comm)); + caller_comm[sizeof(caller_comm) - 1] = '\0'; + + len += scnprintf(buf + len, buf_sz - len, + "%u %s reason=%s backward=%u ts_ns=%llu " + "cycle_in=%llu cycle_last=%llu nsec=%llu " + "frac=%llu delta=%llu ns_offset=%llu " + "ns_out=%llu cc_mult=%llu cc_shift=%u " + "cc_mask=%llu caller_pid=%u caller_comm=%s\n", + e_seq, + ath9k_ptp_tc_trace_event_name(event), + ath9k_ptp_tc_trace_reason_name(reason), + backward, + (unsigned long long)ts_ns, + (unsigned long long)cycle_in, + (unsigned long long)cycle_last, + (unsigned long long)nsec, + (unsigned long long)frac, + (unsigned long long)delta, + (unsigned long long)ns_offset, + (unsigned long long)ns_out, + (unsigned long long)cc_mult, + cc_shift, + (unsigned long long)cc_mask, + caller_pid, + caller_comm); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kvfree(buf); + return ret; +} + +static const struct file_operations fops_ptp_tc_trace = { + .read = read_file_ptp_tc_trace, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Clear the timecounter trace buffer via debugfs. */ +static ssize_t write_file_ptp_tc_trace_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + + WRITE_ONCE(sc->ptp_tc_trace_enable, 0); + atomic_set(&sc->ptp_tc_trace_seq, 0); + if (sc->ptp_tc_trace && sc->ptp_tc_trace_len) + memset(sc->ptp_tc_trace, 0, + sc->ptp_tc_trace_len * sizeof(*sc->ptp_tc_trace)); + + return count; +} + +static const struct file_operations fops_ptp_tc_trace_reset = { + .write = write_file_ptp_tc_trace_reset, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static const char *ath9k_ptp_handoff_dir_name(u8 dir) +{ + switch (dir) { + case ATH9K_PTP_HANDOFF_RX: + return "rx"; + case ATH9K_PTP_HANDOFF_TX: + return "tx"; + default: + return "unknown"; + } +} + +/* Dump recent PTP events at the actual stack handoff point. */ +static ssize_t read_file_ptp_handoff(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + const size_t buf_sz = 262144; + char *buf; + size_t len = 0; + u32 seq; + u32 start; + u32 ring_len; + u32 ring_mask; + u32 i; + int ret; + + buf = kvmalloc(buf_sz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + seq = (u32)atomic_read(&sc->ptp_handoff_seq); + ring_len = READ_ONCE(sc->ptp_handoff_len); + ring_mask = READ_ONCE(sc->ptp_handoff_mask); + start = (ring_len && seq >= ring_len) ? + (seq - ring_len + 1) : 1; + + len += scnprintf(buf + len, buf_sz - len, + "seq=%u start=%u len=%u\n", + seq, start, ring_len); + + if (!ring_len || !sc->ptp_handoff) { + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kvfree(buf); + return ret; + } - return len; + for (i = start; i <= seq && len < buf_sz - 2048; i++) { + struct ath9k_ptp_handoff_entry *e; + u32 e_seq; + u8 dir; + u8 ptp_msgtype; + u8 epoch_valid; + u8 hwts_valid; + u16 ptp_seqid; + u16 frame_control; + u16 seq_ctrl; + u16 wlan_seq; + u16 frag; + u16 qos_control; + u8 retry; + u8 qos_valid; + u8 tid; + u8 skb_priority; + u8 skb_queue; + u8 tx_hw_queue; + u8 txq_qnum; + s8 txq_mac80211_qnum; + u8 tx_qid; + u8 tx_tid; + u8 tx_rateindex; + u8 tx_shortretry; + u8 tx_longretry; + s8 tx_rssi; + u16 tx_status; + u16 tx_flags; + u32 tx_info_flags; + u32 txq_depth; + u32 txq_ampdu_depth; + u32 txq_pending_frames; + u16 txq_aifs; + u16 txq_cwmin; + u16 txq_cwmax; + u32 txq_burst_time; + u32 txq_ready_time; + u8 bcnq_qnum; + u16 bcnq_aifs; + u16 bcnq_cwmin; + u16 bcnq_cwmax; + u32 bcnq_burst_time; + u32 bcnq_ready_time; + u32 rx_rxs_flags; + u32 rx_ampdu_reference; + u16 rx_enc_flags; + u8 rx_rs_status; + u8 rx_rs_flags; + u8 rx_phyerr; + s8 rx_rssi; + s8 rx_signal; + u8 rx_keyix; + u8 rx_rate; + u8 rx_rate_idx; + u8 rx_antenna; + u8 rx_bw; + u8 rx_nss; + u8 rx_chains; + u8 rx_more; + u8 rx_isaggr; + u8 rx_firstaggr; + u8 rx_moreaggr; + u8 rx_num_delims; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + u64 handoff_ns; + u64 hwtstamp_ns; + u64 tsf64; + u32 tstamp; + u32 duration; + u64 ext; + u64 wraps; + s64 tsf_offset; + u64 rebase_cnt; + u32 epoch; + u64 sample_cycle; + u64 sample_ns; + u64 tc_cycle_last; + u64 tc_nsec; + u64 tc_frac; + u64 adj_seq; + + e = &sc->ptp_handoff[i & ring_mask]; + e_seq = READ_ONCE(e->seq); + if (e_seq != i) + continue; + + dir = READ_ONCE(e->dir); + ptp_msgtype = READ_ONCE(e->ptp_msgtype); + epoch_valid = READ_ONCE(e->epoch_valid); + hwts_valid = READ_ONCE(e->hwts_valid); + ptp_seqid = READ_ONCE(e->ptp_seqid); + frame_control = READ_ONCE(e->frame_control); + seq_ctrl = READ_ONCE(e->seq_ctrl); + wlan_seq = READ_ONCE(e->wlan_seq); + frag = READ_ONCE(e->frag); + qos_control = READ_ONCE(e->qos_control); + retry = READ_ONCE(e->retry); + qos_valid = READ_ONCE(e->qos_valid); + tid = READ_ONCE(e->tid); + skb_priority = READ_ONCE(e->skb_priority); + skb_queue = READ_ONCE(e->skb_queue); + tx_hw_queue = READ_ONCE(e->tx_hw_queue); + txq_qnum = READ_ONCE(e->txq_qnum); + txq_mac80211_qnum = READ_ONCE(e->txq_mac80211_qnum); + tx_qid = READ_ONCE(e->tx_qid); + tx_tid = READ_ONCE(e->tx_tid); + tx_rateindex = READ_ONCE(e->tx_rateindex); + tx_shortretry = READ_ONCE(e->tx_shortretry); + tx_longretry = READ_ONCE(e->tx_longretry); + tx_rssi = READ_ONCE(e->tx_rssi); + tx_status = READ_ONCE(e->tx_status); + tx_flags = READ_ONCE(e->tx_flags); + tx_info_flags = READ_ONCE(e->tx_info_flags); + txq_depth = READ_ONCE(e->txq_depth); + txq_ampdu_depth = READ_ONCE(e->txq_ampdu_depth); + txq_pending_frames = READ_ONCE(e->txq_pending_frames); + txq_aifs = READ_ONCE(e->txq_aifs); + txq_cwmin = READ_ONCE(e->txq_cwmin); + txq_cwmax = READ_ONCE(e->txq_cwmax); + txq_burst_time = READ_ONCE(e->txq_burst_time); + txq_ready_time = READ_ONCE(e->txq_ready_time); + bcnq_qnum = READ_ONCE(e->bcnq_qnum); + bcnq_aifs = READ_ONCE(e->bcnq_aifs); + bcnq_cwmin = READ_ONCE(e->bcnq_cwmin); + bcnq_cwmax = READ_ONCE(e->bcnq_cwmax); + bcnq_burst_time = READ_ONCE(e->bcnq_burst_time); + bcnq_ready_time = READ_ONCE(e->bcnq_ready_time); + rx_rxs_flags = READ_ONCE(e->rx_rxs_flags); + rx_ampdu_reference = READ_ONCE(e->rx_ampdu_reference); + rx_enc_flags = READ_ONCE(e->rx_enc_flags); + rx_rs_status = READ_ONCE(e->rx_rs_status); + rx_rs_flags = READ_ONCE(e->rx_rs_flags); + rx_phyerr = READ_ONCE(e->rx_phyerr); + rx_rssi = READ_ONCE(e->rx_rssi); + rx_signal = READ_ONCE(e->rx_signal); + rx_keyix = READ_ONCE(e->rx_keyix); + rx_rate = READ_ONCE(e->rx_rate); + rx_rate_idx = READ_ONCE(e->rx_rate_idx); + rx_antenna = READ_ONCE(e->rx_antenna); + rx_bw = READ_ONCE(e->rx_bw); + rx_nss = READ_ONCE(e->rx_nss); + rx_chains = READ_ONCE(e->rx_chains); + rx_more = READ_ONCE(e->rx_more); + rx_isaggr = READ_ONCE(e->rx_isaggr); + rx_firstaggr = READ_ONCE(e->rx_firstaggr); + rx_moreaggr = READ_ONCE(e->rx_moreaggr); + rx_num_delims = READ_ONCE(e->rx_num_delims); + ether_addr_copy(addr1, e->addr1); + ether_addr_copy(addr2, e->addr2); + ether_addr_copy(addr3, e->addr3); + handoff_ns = READ_ONCE(e->handoff_ns); + hwtstamp_ns = READ_ONCE(e->hwtstamp_ns); + tsf64 = READ_ONCE(e->tsf64); + tstamp = READ_ONCE(e->tstamp); + duration = READ_ONCE(e->duration); + ext = READ_ONCE(e->ext); + wraps = READ_ONCE(e->wraps); + tsf_offset = READ_ONCE(e->tsf_offset); + rebase_cnt = READ_ONCE(e->rebase_cnt); + epoch = READ_ONCE(e->epoch); + sample_cycle = READ_ONCE(e->sample_cycle); + sample_ns = READ_ONCE(e->sample_ns); + tc_cycle_last = READ_ONCE(e->tc_cycle_last); + tc_nsec = READ_ONCE(e->tc_nsec); + tc_frac = READ_ONCE(e->tc_frac); + adj_seq = READ_ONCE(e->adj_seq); + + len += scnprintf(buf + len, buf_sz - len, + "%u %s handoff_ns=%llu ptp_msg=%u ptp_seq=%u " + "fc=0x%04x seq_ctrl=0x%04x wlan_seq=%u frag=%u " + "retry=%u qos=%u qos_ctl=0x%04x tid=%u " + "skb_prio=%u skb_q=%u tx_hwq=%u txq_q=%u " + "txq_macq=%d tx_qid=%u tx_tid=%u " + "tx_status=0x%04x tx_flags=0x%04x " + "tx_info_flags=0x%08x tx_rateidx=%u " + "tx_shortretry=%u tx_longretry=%u tx_rssi=%d " + "txq_depth=%u txq_ampdu_depth=%u " + "txq_pending=%u txq_aifs=%u txq_cwmin=%u " + "txq_cwmax=%u txq_burst=%u txq_ready=%u " + "bcnq_q=%u bcnq_aifs=%u bcnq_cwmin=%u " + "bcnq_cwmax=%u bcnq_burst=%u bcnq_ready=%u " + "rx_rxs_flags=0x%08x rx_ampdu_ref=%u " + "rx_enc_flags=0x%04x rx_rs_status=0x%02x " + "rx_rs_flags=0x%02x rx_phyerr=%u " + "rx_rate=0x%02x rx_rateidx=%u rx_bw=%u " + "rx_antenna=%u rx_rssi=%d rx_signal=%d " + "rx_keyix=%u rx_nss=%u rx_chains=%u " + "rx_more=%u rx_isaggr=%u rx_firstaggr=%u " + "rx_moreaggr=%u rx_num_delims=%u " + "addr1=%pM addr2=%pM addr3=%pM " + "hwts_valid=%u hwtstamp_ns=%llu tsf64=%llu " + "tstamp=0x%08x duration=%u ext=%llu wraps=%llu " + "tsf_offset=%lld rebase_cnt=%llu epoch=%u " + "epoch_valid=%u sample_cycle=%llu sample_ns=%llu " + "tc_cycle_last=%llu tc_nsec=%llu tc_frac=%llu " + "adj_seq=%llu\n", + e_seq, + ath9k_ptp_handoff_dir_name(dir), + (unsigned long long)handoff_ns, + ptp_msgtype, + ptp_seqid, + frame_control, + seq_ctrl, + wlan_seq, + frag, + retry, + qos_valid, + qos_control, + tid, + skb_priority, + skb_queue, + tx_hw_queue, + txq_qnum, + txq_mac80211_qnum, + tx_qid, + tx_tid, + tx_status, + tx_flags, + tx_info_flags, + tx_rateindex, + tx_shortretry, + tx_longretry, + tx_rssi, + txq_depth, + txq_ampdu_depth, + txq_pending_frames, + txq_aifs, + txq_cwmin, + txq_cwmax, + txq_burst_time, + txq_ready_time, + bcnq_qnum, + bcnq_aifs, + bcnq_cwmin, + bcnq_cwmax, + bcnq_burst_time, + bcnq_ready_time, + rx_rxs_flags, + rx_ampdu_reference, + rx_enc_flags, + rx_rs_status, + rx_rs_flags, + rx_phyerr, + rx_rate, + rx_rate_idx, + rx_bw, + rx_antenna, + rx_rssi, + rx_signal, + rx_keyix, + rx_nss, + rx_chains, + rx_more, + rx_isaggr, + rx_firstaggr, + rx_moreaggr, + rx_num_delims, + addr1, + addr2, + addr3, + hwts_valid, + (unsigned long long)hwtstamp_ns, + (unsigned long long)tsf64, + tstamp, + duration, + (unsigned long long)ext, + (unsigned long long)wraps, + (long long)tsf_offset, + (unsigned long long)rebase_cnt, + epoch, + epoch_valid, + (unsigned long long)sample_cycle, + (unsigned long long)sample_ns, + (unsigned long long)tc_cycle_last, + (unsigned long long)tc_nsec, + (unsigned long long)tc_frac, + (unsigned long long)adj_seq); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kvfree(buf); + return ret; +} + +static const struct file_operations fops_ptp_handoff = { + .read = read_file_ptp_handoff, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t write_file_ptp_handoff_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + + atomic_set(&sc->ptp_handoff_seq, 0); + if (sc->ptp_handoff && sc->ptp_handoff_len) + memset(sc->ptp_handoff, 0, + sc->ptp_handoff_len * sizeof(*sc->ptp_handoff)); + + return count; +} + +static const struct file_operations fops_ptp_handoff_reset = { + .write = write_file_ptp_handoff_reset, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Expose the last cyclecounter glitch record via debugfs. */ +static ssize_t read_file_ptp_cc_glitch(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[512]; + int len; + u8 valid = READ_ONCE(sc->ptp_cc_glitch_valid); + u64 cnt = READ_ONCE(sc->ptp_cc_glitch_cnt); + u64 ts_ns = READ_ONCE(sc->ptp_cc_glitch_ts_ns); + u64 last = READ_ONCE(sc->ptp_cc_glitch_last); + u64 new = READ_ONCE(sc->ptp_cc_glitch_new); + s64 delta = READ_ONCE(sc->ptp_cc_glitch_delta); + u64 raw = READ_ONCE(sc->ptp_cc_glitch_raw); + s64 offset = READ_ONCE(sc->ptp_cc_glitch_offset); + u32 hi1 = READ_ONCE(sc->ptp_cc_glitch_hi1); + u32 lo = READ_ONCE(sc->ptp_cc_glitch_lo); + u32 hi2 = READ_ONCE(sc->ptp_cc_glitch_hi2); + u32 pid = READ_ONCE(sc->ptp_cc_glitch_pid); + u64 last_cc = READ_ONCE(sc->ptp_cc_last); + u64 last_raw = READ_ONCE(sc->ptp_cc_last_raw); + u32 last_hi1 = READ_ONCE(sc->ptp_cc_last_hi1); + u32 last_lo = READ_ONCE(sc->ptp_cc_last_lo); + u32 last_hi2 = READ_ONCE(sc->ptp_cc_last_hi2); + char comm[TASK_COMM_LEN]; + + memcpy(comm, sc->ptp_cc_glitch_comm, sizeof(comm)); + comm[sizeof(comm) - 1] = '\0'; + + len = scnprintf(buf, sizeof(buf), + "valid=%u cnt=%llu ts_ns=%llu last=%llu new=%llu " + "delta=%lld raw_tsf=%llu raw_hi1=0x%08x " + "raw_lo=0x%08x raw_hi2=0x%08x tsf_offset=%lld " + "pid=%u comm=%s\n" + "last_cc=%llu last_raw=%llu last_hi1=0x%08x " + "last_lo=0x%08x last_hi2=0x%08x\n", + valid, + (unsigned long long)cnt, + (unsigned long long)ts_ns, + (unsigned long long)last, + (unsigned long long)new, + (long long)delta, + (unsigned long long)raw, + hi1, lo, hi2, + (long long)offset, + pid, + comm, + (unsigned long long)last_cc, + (unsigned long long)last_raw, + last_hi1, last_lo, last_hi2); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_cc_glitch = { + .read = read_file_ptp_cc_glitch, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_ptp_tc_anomaly(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[1024]; + int len; + u8 valid = READ_ONCE(sc->ptp_tc_anomaly_valid); + u8 source = READ_ONCE(sc->ptp_tc_anomaly_source); + u8 reason = READ_ONCE(sc->ptp_tc_anomaly_reason); + + len = scnprintf(buf, sizeof(buf), + "valid=%u cnt=%llu ts_ns=%llu source=%u source_name=%s " + "reason=%u reason_name=%s adj_seq=%llu\n" + "host_now_ns=%llu host_last_ns=%llu host_delta_ns=%lld " + "phc_now_ns=%llu phc_last_ns=%llu phc_delta_ns=%lld\n" + "cycle_now=%llu cycle_last=%llu cycle_delta=%lld " + "raw_now=%llu raw_last=%llu raw_delta=%lld\n" + "offset_ns=%lld mismatch_ns=%lld ns_offset=%llu " + "cc_mult=%llu cc_shift=%u cc_mask=%#llx thresh_ns=%llu\n", + valid, + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_cnt), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_ts_ns), + source, + ath9k_ptp_tc_anomaly_source_name(source), + reason, + ath9k_ptp_tc_trace_reason_name(reason), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_adj_seq), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_host_ns), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_host_last_ns), + (long long)READ_ONCE(sc->ptp_tc_anomaly_host_delta_ns), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_ns), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_ns_last), + (long long)READ_ONCE(sc->ptp_tc_anomaly_ns_delta_ns), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_cycle_now), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_cycle_last), + (long long)READ_ONCE(sc->ptp_tc_anomaly_cycle_delta), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_raw_now), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_raw_last), + (long long)READ_ONCE(sc->ptp_tc_anomaly_raw_delta), + (long long)READ_ONCE(sc->ptp_tc_anomaly_offset_ns), + (long long)READ_ONCE(sc->ptp_tc_anomaly_mismatch_ns), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_ns_offset), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_cc_mult), + READ_ONCE(sc->ptp_tc_anomaly_cc_shift), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_cc_mask), + (unsigned long long)READ_ONCE(sc->ptp_tc_anomaly_thresh_ns)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_tc_anomaly = { + .read = read_file_ptp_tc_anomaly, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_ptp_hwt_anomaly(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[1024]; + int len; + u8 valid = READ_ONCE(sc->ptp_hwt_anomaly_valid); + u8 reason = READ_ONCE(sc->ptp_hwt_anomaly_reason); + + len = scnprintf(buf, sizeof(buf), + "valid=%u cnt=%llu ts_ns=%llu reason=%u reason_name=%s " + "adj_seq=%llu rebase_cnt=%llu epoch=%u\n" + "prev_ns=%llu ns=%llu prev_tc_nsec=%llu tc_nsec=%llu " + "cycle=%llu cycle_adj=%llu tc_cycle_last=%llu\n" + "tsf64=%llu offset_ns=%lld offset_rem_ns=%lld\n", + valid, + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_cnt), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_ts_ns), + reason, + ath9k_ptp_tc_trace_reason_name(reason), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_adj_seq), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_rebase_cnt), + READ_ONCE(sc->ptp_hwt_anomaly_epoch), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_prev_ns), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_ns), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_prev_tc_nsec), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_tc_nsec), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_cycle), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_cycle_adj), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_tc_cycle_last), + (unsigned long long)READ_ONCE(sc->ptp_hwt_anomaly_tsf64), + (long long)READ_ONCE(sc->ptp_hwt_anomaly_offset_ns), + (long long)READ_ONCE(sc->ptp_hwt_anomaly_offset_rem_ns)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } -static const struct file_operations fops_dirtyts = { - .read = read_file_dirtyts, - .write = write_file_dirtyts, +static const struct file_operations fops_ptp_hwt_anomaly = { + .read = read_file_ptp_hwt_anomaly, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_ptp_tc_mutation(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[1024]; + int len; + u8 valid = READ_ONCE(sc->ptp_tc_mutation_valid); + u8 source = READ_ONCE(sc->ptp_tc_mutation_source); + + len = scnprintf(buf, sizeof(buf), + "valid=%u cnt=%llu ts_ns=%llu source=%u source_name=%s " + "adj_seq=%llu target_ns=%llu delta_ns=%lld shift_arg=%u " + "cc_mult=%llu cc_shift=%u\n" + "pre_tc_nsec=%llu post_tc_nsec=%llu " + "pre_cycle_last=%llu post_cycle_last=%llu " + "pre_frac=%llu post_frac=%llu\n", + valid, + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_cnt), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_ts_ns), + source, + ath9k_ptp_tc_mutation_source_name(source), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_adj_seq), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_target_ns), + (long long)READ_ONCE(sc->ptp_tc_mutation_delta_ns), + READ_ONCE(sc->ptp_tc_mutation_shift_arg), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_cc_mult), + READ_ONCE(sc->ptp_tc_mutation_cc_shift), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_pre_tc_nsec), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_post_tc_nsec), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_pre_cycle_last), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_post_cycle_last), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_pre_frac), + (unsigned long long)READ_ONCE(sc->ptp_tc_mutation_post_frac)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_tc_mutation = { + .read = read_file_ptp_tc_mutation, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Clear the PTP event log buffer via debugfs. */ +static ssize_t write_file_ptp_evtlog_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + + WRITE_ONCE(sc->ptp_evtlog_enable, 0); + smp_mb(); + atomic_set(&sc->ptp_evtlog_seq, 0); + if (sc->ptp_evtlog && sc->ptp_evtlog_len) + memset(sc->ptp_evtlog, 0, + sc->ptp_evtlog_len * sizeof(*sc->ptp_evtlog)); + WRITE_ONCE(sc->ptp_rx_parse_attempt_cnt, 0); + WRITE_ONCE(sc->ptp_rx_parse_miss_cnt, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_reason, ATH9K_PTP_PARSE_OK); + WRITE_ONCE(sc->ptp_rx_parse_last_msgtype, 0xFF); + WRITE_ONCE(sc->ptp_rx_parse_last_seqid, 0xFFFF); + WRITE_ONCE(sc->ptp_rx_parse_last_skb_len, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_hdrlen, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_payload_len, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_snap_off, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_ptp_off, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_ptp_len, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_ethertype, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_sport, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_dport, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_fc, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_seq_ctrl, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_ip_version, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_ip_proto, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_ihl, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_bytes_len, 0); + memset(sc->ptp_rx_parse_last_bytes, 0, + sizeof(sc->ptp_rx_parse_last_bytes)); + + return count; +} + +static const struct file_operations fops_ptp_evtlog_reset = { + .write = write_file_ptp_evtlog_reset, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Expose the last captured stack trace via debugfs. */ +static ssize_t read_file_ptp_stack(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + const size_t buf_sz = 8192; + char *buf; + size_t len = 0; + unsigned long flags; + unsigned long entries[ATH9K_PTP_STACK_DEPTH]; + u8 enable; + u8 event; + u16 depth; + s64 arg; + u64 ts_ns; + u32 pid; + char comm[TASK_COMM_LEN]; + int i; + int ret; + + buf = kvmalloc(buf_sz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + spin_lock_irqsave(&sc->systim_lock, flags); + enable = sc->ptp_stack_enable; + event = sc->ptp_last_stack_event; + depth = sc->ptp_last_stack_len; + arg = sc->ptp_last_stack_arg; + ts_ns = sc->ptp_last_stack_ns; + pid = sc->ptp_last_stack_pid; + memcpy(comm, sc->ptp_last_stack_comm, sizeof(comm)); + if (depth) + memcpy(entries, sc->ptp_last_stack_entries, + depth * sizeof(entries[0])); + spin_unlock_irqrestore(&sc->systim_lock, flags); + comm[sizeof(comm) - 1] = '\0'; + + len += scnprintf(buf + len, buf_sz - len, + "enable=%u event=%s arg=%lld ts_ns=%llu pid=%u " + "comm=%s depth=%u\n", + enable, + ath9k_ptp_evtlog_name(event), + (long long)arg, + (unsigned long long)ts_ns, + pid, + comm, + depth); + + for (i = 0; i < depth && len < buf_sz - 64; i++) + len += scnprintf(buf + len, buf_sz - len, + "%02d %pS\n", i, (void *)entries[i]); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kvfree(buf); + return ret; +} + +static const struct file_operations fops_ptp_stack = { + .read = read_file_ptp_stack, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +/* Expose the last RX/TX sample details via debugfs. */ +static ssize_t read_file_ptp_sample(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[512]; + int len; + struct ath9k_ptp_sample rx; + struct ath9k_ptp_sample tx; + s64 rx_hi_delta; + s64 tx_hi_delta; + u32 rx_lo_ok; + u32 tx_lo_ok; + + rx.tsf64 = READ_ONCE(sc->ptp_sample_rx.tsf64); + rx.tstamp = READ_ONCE(sc->ptp_sample_rx.tstamp); + rx.duration = READ_ONCE(sc->ptp_sample_rx.duration); + rx.read_dur_ns = READ_ONCE(sc->ptp_sample_rx.read_dur_ns); + rx.ext = READ_ONCE(sc->ptp_sample_rx.ext); + rx.epoch = READ_ONCE(sc->ptp_sample_rx.epoch); + rx.epoch_valid = READ_ONCE(sc->ptp_sample_rx.epoch_valid); + rx.cycle = READ_ONCE(sc->ptp_sample_rx.cycle); + rx.ns = READ_ONCE(sc->ptp_sample_rx.ns); + rx.tc_cycle_last = READ_ONCE(sc->ptp_sample_rx.tc_cycle_last); + rx.tc_nsec = READ_ONCE(sc->ptp_sample_rx.tc_nsec); + rx.tc_frac = READ_ONCE(sc->ptp_sample_rx.tc_frac); + rx.adj_seq = READ_ONCE(sc->ptp_sample_rx.adj_seq); + + tx.tsf64 = READ_ONCE(sc->ptp_sample_tx.tsf64); + tx.tstamp = READ_ONCE(sc->ptp_sample_tx.tstamp); + tx.duration = READ_ONCE(sc->ptp_sample_tx.duration); + tx.read_dur_ns = READ_ONCE(sc->ptp_sample_tx.read_dur_ns); + tx.ext = READ_ONCE(sc->ptp_sample_tx.ext); + tx.epoch = READ_ONCE(sc->ptp_sample_tx.epoch); + tx.epoch_valid = READ_ONCE(sc->ptp_sample_tx.epoch_valid); + tx.cycle = READ_ONCE(sc->ptp_sample_tx.cycle); + tx.ns = READ_ONCE(sc->ptp_sample_tx.ns); + tx.tc_cycle_last = READ_ONCE(sc->ptp_sample_tx.tc_cycle_last); + tx.tc_nsec = READ_ONCE(sc->ptp_sample_tx.tc_nsec); + tx.tc_frac = READ_ONCE(sc->ptp_sample_tx.tc_frac); + tx.adj_seq = READ_ONCE(sc->ptp_sample_tx.adj_seq); + + rx_hi_delta = (s64)((rx.ext >> 32) - (rx.tsf64 >> 32)); + tx_hi_delta = (s64)((tx.ext >> 32) - (tx.tsf64 >> 32)); + rx_lo_ok = ((u32)rx.ext == rx.tstamp); + tx_lo_ok = ((u32)tx.ext == tx.tstamp); + + len = scnprintf(buf, sizeof(buf), + "rx tsf64=%llu tstamp=%u read_dur_ns=%u ext=%llu ext_hi_delta=%lld ext_lo_ok=%u epoch=%u epoch_valid=%u cycle=%llu ns=%llu " + "tc_cycle_last=%llu tc_nsec=%llu tc_frac=%llu adj_seq=%llu\n", + (unsigned long long)rx.tsf64, + rx.tstamp, + rx.read_dur_ns, + (unsigned long long)rx.ext, + (long long)rx_hi_delta, + rx_lo_ok, + rx.epoch, + rx.epoch_valid, + (unsigned long long)rx.cycle, + (unsigned long long)rx.ns, + (unsigned long long)rx.tc_cycle_last, + (unsigned long long)rx.tc_nsec, + (unsigned long long)rx.tc_frac, + (unsigned long long)rx.adj_seq); + + len += scnprintf(buf + len, sizeof(buf) - len, + "tx tsf64=%llu tstamp=%u duration=%u read_dur_ns=%u ext=%llu ext_hi_delta=%lld ext_lo_ok=%u epoch=%u epoch_valid=%u cycle=%llu ns=%llu " + "tc_cycle_last=%llu tc_nsec=%llu tc_frac=%llu adj_seq=%llu\n", + (unsigned long long)tx.tsf64, + tx.tstamp, + tx.duration, + tx.read_dur_ns, + (unsigned long long)tx.ext, + (long long)tx_hi_delta, + tx_lo_ok, + tx.epoch, + tx.epoch_valid, + (unsigned long long)tx.cycle, + (unsigned long long)tx.ns, + (unsigned long long)tx.tc_cycle_last, + (unsigned long long)tx.tc_nsec, + (unsigned long long)tx.tc_frac, + (unsigned long long)tx.adj_seq); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ptp_sample = { + .read = read_file_ptp_sample, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, @@ -188,12 +2521,8 @@ static ssize_t write_file_trigger_cbr(struct file *file, const char __user *user struct ath_hw *ah = sc->sc_ah; if (count & 1) { - // printk("ath9k: cbr open\n"); // TODO: only debug -// REG_RMW(ah, AR_QMISC(ATH_TXQ_AC_VI), AR_Q_MISC_FSP_CBR, AR_Q_MISC_FSP); REG_WRITE(ah, AR_QCBRCFG(ATH_TXQ_AC_VI), 0xc350); } else { - // printk("ath9k: cbr gated\n"); // TODO: only debug -// REG_RMW(ah, AR_QMISC(ATH_TXQ_AC_VI), 0x4, AR_Q_MISC_FSP); } return count; @@ -206,7 +2535,6 @@ static const struct file_operations fops_trigger_cbr = { .owner = THIS_MODULE, .llseek = default_llseek, }; -// focus: wiptp end #define DMA_BUF_LEN 1024 @@ -1516,13 +3844,276 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy, read_file_dump_nfcal); - // focus: wiptp start - debugfs_create_file("dirtyts", 0600, sc->debug.debugfs_phy, - sc, &fops_dirtyts); + + debugfs_create_file("ptp_mon", 0600, sc->debug.debugfs_phy, + sc, &fops_ptp_mon); + + debugfs_create_file("ptp_tc", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_tc); + + debugfs_create_file("ptp_tc_read", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_tc_read); + + debugfs_create_file("ptp_cc_shift", 0600, sc->debug.debugfs_phy, + sc, &fops_ptp_cc_shift); + + debugfs_create_file("ptp_settsf", 0200, sc->debug.debugfs_phy, + sc, &fops_ptp_settsf); + + debugfs_create_file("ptp_adj", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_adj); + + debugfs_create_u32("ptp_wrap_glitch_thresh", 0600, + sc->debug.debugfs_phy, + &sc->ptp_wrap_glitch_thresh); + debugfs_create_u32("ptp_rebase_drop_ns", 0600, + sc->debug.debugfs_phy, + &sc->ptp_rebase_drop_ns); + debugfs_create_u32("ptp_rebase_quarantine_ns", 0600, + sc->debug.debugfs_phy, + &sc->ptp_rebase_drop_ns); + debugfs_create_u64("ptp_rebase_drop_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_rebase_drop_cnt); + debugfs_create_u64("ptp_rebase_rx_reject_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_rebase_rx_reject_cnt); + debugfs_create_u64("ptp_rebase_tx_reject_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_rebase_tx_reject_cnt); + debugfs_create_u64("ptp_rebase_fallback_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_rebase_fallback_cnt); + debugfs_create_u32("ptp_guard_timeout_ms", 0600, + sc->debug.debugfs_phy, + &sc->ptp_guard_timeout_ms); + debugfs_create_u32("ptp_guard_after_event_ms", 0600, + sc->debug.debugfs_phy, + &sc->ptp_guard_after_event_ms); + debugfs_create_u8("ptp_guard_inflight", 0400, + sc->debug.debugfs_phy, + &sc->ptp_guard_inflight); + debugfs_create_u16("ptp_guard_seqid", 0400, + sc->debug.debugfs_phy, + &sc->ptp_guard_seqid); + debugfs_create_u64("ptp_guard_block_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_guard_block_cnt); + debugfs_create_u64("ptp_guard_timeout_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_guard_timeout_cnt); + debugfs_create_u64("ptp_guard_block_last_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_guard_block_last_ns); + debugfs_create_u64("ptp_last_event_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_last_event_ns); + debugfs_create_u64("ptp_last_event_rx_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_last_event_rx_ns); + debugfs_create_u64("ptp_last_event_tx_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_last_event_tx_ns); + debugfs_create_u32("ptp_sync_dup_window_us", 0600, + sc->debug.debugfs_phy, + &sc->ptp_sync_dup_window_us); + debugfs_create_u32("ptp_sync_dup_last_delta_us", 0400, + sc->debug.debugfs_phy, + &sc->ptp_sync_dup_last_delta_us); + debugfs_create_u64("ptp_sync_dup_drop_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_sync_dup_drop_cnt); + debugfs_create_u32("ptp_sta_hidden_step_thresh_ns", 0600, + sc->debug.debugfs_phy, + &sc->ptp_sta_hidden_step_thresh_ns); + debugfs_create_u64("ptp_sta_hidden_step_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_sta_hidden_step_cnt); + debugfs_create_u64("ptp_sta_hidden_step_last_abs_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_sta_hidden_step_last_abs_ns); + debugfs_create_u64("ptp_sta_hidden_step_last_reanchor_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_sta_hidden_step_last_reanchor_ns); + debugfs_create_file("ptp_sta_hidden_step", 0400, + sc->debug.debugfs_phy, + sc, &fops_ptp_sta_hidden_step); + debugfs_create_u32("ptp_infra_reanchor_guard_ns", 0600, + sc->debug.debugfs_phy, + &sc->ptp_infra_reanchor_guard_ns); + debugfs_create_u8("ptp_infra_reanchor_enable", 0600, + sc->debug.debugfs_phy, + &sc->ptp_infra_reanchor_enable); + debugfs_create_u64("ptp_infra_reanchor_reject_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_infra_reanchor_reject_cnt); + debugfs_create_u64("ptp_infra_reanchor_last_delta_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_infra_reanchor_last_delta_ns); + debugfs_create_u64("ptp_settsf_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_cnt); + debugfs_create_u64("ptp_settsf_last_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_last_ns); + debugfs_create_u64("ptp_settsf_last_delta_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_last_delta_ns); + debugfs_create_u32("ptp_settsf_delta_thresh_us", 0600, + sc->debug.debugfs_phy, + &sc->ptp_settsf_delta_thresh_us); + debugfs_create_u64("ptp_settsf_delta_skip_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_delta_skip_cnt); + debugfs_create_u8("ptp_settsf_dur_filter_enable", 0600, + sc->debug.debugfs_phy, + &sc->ptp_settsf_dur_filter_enable); + debugfs_create_u32("ptp_settsf_dur_pct_over_avg", 0600, + sc->debug.debugfs_phy, + &sc->ptp_settsf_dur_pct_over_avg); + debugfs_create_u8("ptp_settsf_dur_hist_max", 0600, + sc->debug.debugfs_phy, + &sc->ptp_settsf_dur_hist_max); + debugfs_create_u32("ptp_settsf_write_pct_over_avg", 0600, + sc->debug.debugfs_phy, + &sc->ptp_settsf_write_pct_over_avg); + debugfs_create_u8("ptp_settsf_write_hist_max", 0600, + sc->debug.debugfs_phy, + &sc->ptp_settsf_write_hist_max); + debugfs_create_file("ptp_settsf_dur_drop_cnt", 0400, + sc->debug.debugfs_phy, sc, + &fops_ptp_settsf_dur_drop); + debugfs_create_u32("ptp_settsf_dur_last_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_dur_last_ns); + debugfs_create_u32("ptp_settsf_dur_last_avg_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_dur_last_avg_ns); + debugfs_create_u32("ptp_settsf_dur_last_med_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_dur_last_med_ns); + debugfs_create_u32("ptp_settsf_dur_last_thresh_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_dur_last_thresh_ns); + debugfs_create_u32("ptp_settsf_write_last_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_write_last_ns); + debugfs_create_u32("ptp_settsf_write_last_avg_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_write_last_avg_ns); + debugfs_create_u32("ptp_settsf_write_last_cap_ns", 0400, + sc->debug.debugfs_phy, + &sc->ptp_settsf_write_last_cap_ns); + debugfs_create_file("ptp_settsf_last_jump_ns", 0400, + sc->debug.debugfs_phy, + sc, &fops_ptp_settsf_last_jump); + debugfs_create_u8("ptp_stall_enable", 0600, + sc->debug.debugfs_phy, + &sc->ptp_stall_enable); + debugfs_create_u32("ptp_stall_thresh_ns", 0600, + sc->debug.debugfs_phy, + &sc->ptp_stall_thresh_ns); + debugfs_create_u64("ptp_stall_rx_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_stall_rx_cnt); + debugfs_create_u64("ptp_stall_tx_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_stall_tx_cnt); + debugfs_create_u8("ptp_tx_use_duration", 0600, + sc->debug.debugfs_phy, + &sc->ptp_tx_use_duration); + debugfs_create_u8("ptp_rx_use_duration", 0600, + sc->debug.debugfs_phy, + &sc->ptp_rx_use_duration); + debugfs_create_u8("ptp_noack_ptp_event_enable", 0600, + sc->debug.debugfs_phy, + &sc->ptp_noack_ptp_event_enable); + debugfs_create_u64("ptp_tx_noack_event_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_tx_noack_event_cnt); + debugfs_create_u64("ptp_tx_suspect_status_cnt", 0400, + sc->debug.debugfs_phy, + &sc->ptp_tx_suspect_status_cnt); + + debugfs_create_file("ptp_rx_ts", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_rx_ts); + debugfs_create_file("ptp_rx_parse", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_rx_parse); + debugfs_create_file("ptp_tx_ts", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_tx_ts); + debugfs_create_file("ptp_hwts_miss", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_hwts_miss); + debugfs_create_file("ptp_rx_ptp_types", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_rx_types); + debugfs_create_file("ptp_tx_ptp_types", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_tx_types); + + debugfs_create_file("ptp_wrap", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_wrap); + + debugfs_create_u8("ptp_evtlog_enable", 0600, sc->debug.debugfs_phy, + &sc->ptp_evtlog_enable); + debugfs_create_u8("ptp_evtlog_ptp_only", 0600, sc->debug.debugfs_phy, + &sc->ptp_evtlog_ptp_only); + debugfs_create_u8("ptp_evtlog_phc", 0600, sc->debug.debugfs_phy, + &sc->ptp_evtlog_phc); + debugfs_create_u32("ptp_evtlog_len", 0400, sc->debug.debugfs_phy, + &sc->ptp_evtlog_len); + debugfs_create_u8("ptp_settsf_enable", 0600, sc->debug.debugfs_phy, + &sc->ptp_settsf_enable); + debugfs_create_u8("ptp_sta_hw_reset_restore_enable", 0600, + sc->debug.debugfs_phy, + &sc->sc_ah->ptp_sta_hw_reset_restore_enable); + debugfs_create_u8("ptp_hw_reset_restore_rebase_enable", 0600, + sc->debug.debugfs_phy, + &sc->sc_ah->ptp_hw_reset_restore_rebase_enable); + debugfs_create_file("ptp_tsf_access", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_tsf_access); + + debugfs_create_file("ptp_evtlog", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_evtlog); + + debugfs_create_file("ptp_evtlog_reset", 0200, sc->debug.debugfs_phy, + sc, &fops_ptp_evtlog_reset); + + debugfs_create_u8("ptp_tc_trace_enable", 0600, + sc->debug.debugfs_phy, + &sc->ptp_tc_trace_enable); + debugfs_create_u32("ptp_tc_trace_len", 0400, sc->debug.debugfs_phy, + &sc->ptp_tc_trace_len); + debugfs_create_file("ptp_tc_trace", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_tc_trace); + debugfs_create_file("ptp_tc_trace_reset", 0200, + sc->debug.debugfs_phy, + sc, &fops_ptp_tc_trace_reset); + debugfs_create_file("ptp_handoff", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_handoff); + debugfs_create_file("ptp_handoff_reset", 0200, + sc->debug.debugfs_phy, + sc, &fops_ptp_handoff_reset); + + debugfs_create_file("ptp_cc_glitch", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_cc_glitch); + debugfs_create_u64("ptp_tc_anomaly_thresh_ns", 0600, + sc->debug.debugfs_phy, + &sc->ptp_tc_anomaly_thresh_ns); + debugfs_create_file("ptp_tc_anomaly", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_tc_anomaly); + debugfs_create_file("ptp_hwt_anomaly", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_hwt_anomaly); + debugfs_create_file("ptp_tc_mutation", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_tc_mutation); + + debugfs_create_u8("ptp_stack_enable", 0600, sc->debug.debugfs_phy, + &sc->ptp_stack_enable); + debugfs_create_file("ptp_stack", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_stack); + + debugfs_create_file("ptp_sample", 0400, sc->debug.debugfs_phy, + sc, &fops_ptp_sample); debugfs_create_file("trigger_cbr", 0600, sc->debug.debugfs_phy, sc, &fops_trigger_cbr); - // focus: wiptp end ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah); ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 0281da1ad4c0..0eda61ae8fb7 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +33,92 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); +static void ath9k_ptp_settsf_hist_add(struct ath_softc *sc, u32 dur_ns) +{ + u8 idx = sc->ptp_settsf_dur_hist_idx; + u8 max_idx = sc->ptp_settsf_dur_hist_max; + u8 max_samples; + + if (max_idx >= ATH9K_PTP_SETTSF_DUR_HIST_LEN) + max_idx = ATH9K_PTP_SETTSF_DUR_HIST_LEN - 1; + max_samples = max_idx + 1; + + sc->ptp_settsf_dur_hist[idx] = dur_ns; + idx++; + if (idx >= max_samples) + idx = 0; + sc->ptp_settsf_dur_hist_idx = idx; + if (sc->ptp_settsf_dur_hist_cnt > max_samples) + sc->ptp_settsf_dur_hist_cnt = max_samples; + if (sc->ptp_settsf_dur_hist_cnt < max_samples) + sc->ptp_settsf_dur_hist_cnt++; +} + +static u32 ath9k_ptp_settsf_hist_avg(const struct ath_softc *sc) +{ + u64 sum = 0; + u8 i; + u8 max_idx = sc->ptp_settsf_dur_hist_max; + u8 max_samples; + u8 cnt = sc->ptp_settsf_dur_hist_cnt; + + if (max_idx >= ATH9K_PTP_SETTSF_DUR_HIST_LEN) + max_idx = ATH9K_PTP_SETTSF_DUR_HIST_LEN - 1; + max_samples = max_idx + 1; + if (cnt > max_samples) + cnt = max_samples; + if (!cnt) + return 0; + + for (i = 0; i < cnt; i++) + sum += sc->ptp_settsf_dur_hist[i]; + + return (u32)(sum / cnt); +} + +static void ath9k_ptp_settsf_write_hist_add(struct ath_softc *sc, u32 dur_ns) +{ + u8 idx = sc->ptp_settsf_write_hist_idx; + u8 max_idx = sc->ptp_settsf_write_hist_max; + u8 max_samples; + + if (max_idx >= ATH9K_PTP_SETTSF_DUR_HIST_LEN) + max_idx = ATH9K_PTP_SETTSF_DUR_HIST_LEN - 1; + max_samples = max_idx + 1; + + sc->ptp_settsf_write_hist[idx] = dur_ns; + idx++; + if (idx >= max_samples) + idx = 0; + sc->ptp_settsf_write_hist_idx = idx; + if (sc->ptp_settsf_write_hist_cnt > max_samples) + sc->ptp_settsf_write_hist_cnt = max_samples; + if (sc->ptp_settsf_write_hist_cnt < max_samples) + sc->ptp_settsf_write_hist_cnt++; +} + +static u32 ath9k_ptp_settsf_write_hist_avg(const struct ath_softc *sc) +{ + u64 sum = 0; + u8 i; + u8 max_idx = sc->ptp_settsf_write_hist_max; + u8 max_samples; + u8 cnt = sc->ptp_settsf_write_hist_cnt; + + if (max_idx >= ATH9K_PTP_SETTSF_DUR_HIST_LEN) + max_idx = ATH9K_PTP_SETTSF_DUR_HIST_LEN - 1; + max_samples = max_idx + 1; + if (cnt > max_samples) + cnt = max_samples; + if (!cnt) + return 0; + + for (i = 0; i < cnt; i++) + sum += sc->ptp_settsf_write_hist[i]; + + return (u32)(sum / cnt); +} + MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); @@ -1864,6 +1951,51 @@ u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur) } EXPORT_SYMBOL(ath9k_hw_get_tsf_offset); +u64 ath9k_hw_get_tsf_offset64(struct timespec64 *last, struct timespec64 *cur) +{ + struct timespec64 ts; + u64 usec; + + if (!cur) { + ktime_get_raw_ts64(&ts); + cur = &ts; + } + + usec = (u64)cur->tv_sec * 1000000ULL + (u64)cur->tv_nsec / 1000; + usec -= (u64)last->tv_sec * 1000000ULL + (u64)last->tv_nsec / 1000; + + return usec; +} +EXPORT_SYMBOL(ath9k_hw_get_tsf_offset64); + +static void ath9k_hw_restore_tsf64(struct ath_hw *ah, u64 tsf64) +{ + u64 tsf_old = 0; + + if (ah->ptp_settsf_log) + tsf_old = ath9k_hw_gettsf64(ah); + + REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); + REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); + + if (ah->ptp_settsf_log) + ah->ptp_settsf_log(ah, tsf_old, tsf64); +} + +static void ath9k_hw_reset_restore_tsf64(struct ath_hw *ah, u64 tsf64) +{ + if (READ_ONCE(ah->ptp_hw_reset_restore_rebase_enable)) + ath9k_hw_settsf64(ah, tsf64); + else + ath9k_hw_restore_tsf64(ah, tsf64); +} + +static bool ath9k_hw_reset_should_restore_tsf(struct ath_hw *ah) +{ + return ah->opmode != NL80211_IFTYPE_STATION || + READ_ONCE(ah->ptp_sta_hw_reset_restore_enable); +} + int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, struct ath9k_hw_cal_data *caldata, bool fastcc) { @@ -1872,7 +2004,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, u32 saveDefAntenna; u32 macStaId1; struct timespec64 tsf_ts; - u32 tsf_offset; + u64 tsf_offset; u64 tsf = 0; int r; bool start_mci_reset = false; @@ -1950,9 +2082,15 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, udelay(50); } - /* Restore TSF */ - tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL); - ath9k_hw_settsf64(ah, tsf + tsf_offset); + /* + * Managed STA syncs TSF from AP beacons after reset. Replaying the + * pre-reset TSF here can poison the STA PTP time base before beacon + * sync has a chance to settle it. + */ + if (ath9k_hw_reset_should_restore_tsf(ah)) { + tsf_offset = ath9k_hw_get_tsf_offset64(&tsf_ts, NULL); + ath9k_hw_reset_restore_tsf64(ah, tsf + tsf_offset); + } if (AR_SREV_9280_20_OR_LATER(ah)) REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), AR_GPIO_JTAG_DISABLE); @@ -1974,9 +2112,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, * right after the chip reset. When that happens, write a new * value after the initvals have been applied. */ - if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) { - tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL); - ath9k_hw_settsf64(ah, tsf + tsf_offset); + if (AR_SREV_9100(ah) && ath9k_hw_reset_should_restore_tsf(ah) && + ath9k_hw_gettsf64(ah) < tsf) { + tsf_offset = ath9k_hw_get_tsf_offset64(&tsf_ts, NULL); + ath9k_hw_reset_restore_tsf64(ah, tsf + tsf_offset); } ath9k_hw_init_mfp(ah); @@ -3020,6 +3159,9 @@ u64 ath9k_hw_gettsf64(struct ath_hw *ah) u32 tsf_lower, tsf_upper1, tsf_upper2; int i; + if (ah) + atomic64_inc(&ah->ptp_tsf_get64_cnt); + tsf_upper1 = REG_READ(ah, AR_TSF_U32); for (i = 0; i < ATH9K_MAX_TSF_READ; i++) { tsf_lower = REG_READ(ah, AR_TSF_L32); @@ -3037,20 +3179,260 @@ EXPORT_SYMBOL(ath9k_hw_gettsf64); void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64) { - // focus: wiptp - // REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); - // REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); + u64 tsf_old = 0; + u64 tsf_read = 0; + struct ath_softc *sc = NULL; + u32 dur_u32 = 0; + u32 write_u32 = 0; + u32 total_u32 = 0; + u32 read_avg_u32 = 0; + u32 avg_u32 = 0; + u32 pct_over_avg = 0; + u8 hist_max = 0; + u8 max_samples = 1; + bool window_full = false; + u32 write_avg_u32 = 0; + u32 write_cap_u32 = 0; + u32 write_pct_over_avg = 0; + u8 write_hist_max = 0; + u8 write_max_samples = 1; + bool write_window_full = false; + bool drop = false; + bool skip_settsf = false; + s64 delta_tsf = 0; + u32 delta_thresh = 0; + + u64 read_start_ns = 0; + u64 read_end_ns = 0; + u64 write_start_ns = 0; + u64 write_end_ns = 0; + u64 total_ns = 0; + s64 duration_ns = 0; + + if (ah && ah->hw) + sc = ah->hw->priv; + + if (ah) + atomic64_inc(&ah->ptp_tsf_set_cnt); + + if (ah) { + ah->ptp_settsf_read_start_ns = 0; + ah->ptp_settsf_read_end_ns = 0; + ah->ptp_settsf_write_start_ns = 0; + ah->ptp_settsf_write_end_ns = 0; + ah->ptp_settsf_read_ns = 0; + ah->ptp_settsf_write_ns = 0; + ah->ptp_settsf_total_ns = 0; + } + if (ah && (ah->ptp_wrap_rebase || ah->ptp_settsf_log || + (sc && READ_ONCE(sc->ptp_settsf_delta_thresh_us)))) { + read_start_ns = ktime_get_ns(); + tsf_old = ath9k_hw_gettsf64(ah); + read_end_ns = ktime_get_ns(); + duration_ns = (s64)(read_end_ns - read_start_ns); + } + if (sc && ah && ah->ptp_wrap_rebase) { + delta_thresh = READ_ONCE(sc->ptp_settsf_delta_thresh_us); + if (delta_thresh) { + delta_tsf = (s64)tsf64 - (s64)tsf_old; + /* Skip settsf+rebase for small forward steps. */ + if (delta_tsf >= 0 && + delta_tsf <= (s64)delta_thresh) { + skip_settsf = true; + } + } + } + if (sc) { + if (duration_ns <= 0) + dur_u32 = 0; + else if (duration_ns > (s64)U32_MAX) + dur_u32 = U32_MAX; + else + dur_u32 = (u32)duration_ns; + + sc->ptp_settsf_dur_last_ns = dur_u32; + if (ah) { + ah->ptp_settsf_read_start_ns = read_start_ns; + ah->ptp_settsf_read_end_ns = read_end_ns; + ah->ptp_settsf_read_ns = dur_u32; + } + + sc->ptp_settsf_dur_last_med_ns = 0; + sc->ptp_settsf_dur_last_thresh_ns = 0; + + if (skip_settsf) { + WRITE_ONCE(sc->ptp_settsf_delta_skip_cnt, + READ_ONCE(sc->ptp_settsf_delta_skip_cnt) + 1); + if (ah) { + WRITE_ONCE(ah->ptp_rebase_anchor_valid, false); + atomic64_set(&ah->ptp_settsf_dur_ns, 0); + } + return; + } + + pct_over_avg = sc->ptp_settsf_dur_pct_over_avg; + hist_max = sc->ptp_settsf_dur_hist_max; + if (hist_max >= ATH9K_PTP_SETTSF_DUR_HIST_LEN) + hist_max = ATH9K_PTP_SETTSF_DUR_HIST_LEN - 1; + max_samples = hist_max + 1; + window_full = sc->ptp_settsf_dur_hist_cnt >= max_samples; + + avg_u32 = ath9k_ptp_settsf_hist_avg(sc); + sc->ptp_settsf_dur_last_med_ns = avg_u32; + if (sc->ptp_settsf_dur_filter_enable && + window_full && avg_u32 && pct_over_avg) { + u64 thresh = div64_u64((u64)avg_u32 * + (100 + pct_over_avg), 100); + + if (thresh > (u64)U32_MAX) + thresh = U32_MAX; + sc->ptp_settsf_dur_last_thresh_ns = (u32)thresh; + if (dur_u32 > (u32)thresh) { + sc->ptp_settsf_dur_drop_cnt++; + drop = true; + } + } + if (drop) + return; + sc->ptp_settsf_dur_pass_cnt++; + ath9k_ptp_settsf_hist_add(sc, dur_u32); + sc->ptp_settsf_dur_last_avg_ns = + ath9k_ptp_settsf_hist_avg(sc); + read_avg_u32 = sc->ptp_settsf_dur_last_avg_ns; + } + if (ah && ah->ptp_wrap_rebase) { + s64 offset = atomic64_read(&ah->ptp_tsf_offset); + s64 rem = atomic64_read(&ah->ptp_tsf_offset_ns_rem); + u64 phc_prev = (u64)((s64)tsf_old + offset); + + WRITE_ONCE(ah->ptp_rebase_phc, phc_prev); + WRITE_ONCE(ah->ptp_rebase_phc_rem_ns, rem); + WRITE_ONCE(ah->ptp_rebase_anchor_valid, true); + } + + if (ah && (ah->ptp_wrap_rebase || ah->ptp_settsf_log)) + write_start_ns = ktime_get_ns(); + REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); + REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); + if (ah && (ah->ptp_wrap_rebase || ah->ptp_settsf_log)) { + write_end_ns = ktime_get_ns(); + if (write_end_ns >= write_start_ns) + write_u32 = (write_end_ns - write_start_ns > + (u64)U32_MAX) ? U32_MAX : + (u32)(write_end_ns - write_start_ns); + if (read_start_ns && write_end_ns >= read_start_ns) + total_ns = write_end_ns - read_start_ns; + if (total_ns > (u64)U32_MAX) + total_u32 = U32_MAX; + else + total_u32 = (u32)total_ns; + ah->ptp_settsf_write_start_ns = write_start_ns; + ah->ptp_settsf_write_end_ns = write_end_ns; + ah->ptp_settsf_write_ns = write_u32; + ah->ptp_settsf_total_ns = total_u32; + if (ah->ptp_wrap_rebase) { + u32 write_cap = write_u32; + u64 off_u64; + + if (sc) { + sc->ptp_settsf_write_last_ns = write_u32; + write_pct_over_avg = + sc->ptp_settsf_write_pct_over_avg; + write_hist_max = + sc->ptp_settsf_write_hist_max; + if (write_hist_max >= + ATH9K_PTP_SETTSF_DUR_HIST_LEN) + write_hist_max = + ATH9K_PTP_SETTSF_DUR_HIST_LEN - + 1; + write_max_samples = write_hist_max + 1; + write_window_full = + sc->ptp_settsf_write_hist_cnt >= + write_max_samples; + + write_avg_u32 = + ath9k_ptp_settsf_write_hist_avg(sc); + sc->ptp_settsf_write_last_avg_ns = + write_avg_u32; + sc->ptp_settsf_write_last_cap_ns = 0; + + if (sc->ptp_settsf_dur_filter_enable && + write_window_full && write_avg_u32 && + write_pct_over_avg) { + u64 cap = div64_u64( + (u64)write_avg_u32 * + (100 + write_pct_over_avg), + 100); + + if (cap > (u64)U32_MAX) + cap = U32_MAX; + sc->ptp_settsf_write_last_cap_ns = + (u32)cap; + if (write_cap > (u32)cap) + write_cap = (u32)cap; + } + + write_cap_u32 = write_cap; + if (write_window_full) + ath9k_ptp_settsf_write_hist_add(sc, + write_cap_u32); + else + ath9k_ptp_settsf_write_hist_add(sc, + write_u32); + } + + off_u64 = (u64)(read_avg_u32 ? read_avg_u32 : dur_u32) + + (u64)write_cap; + u32 off_u32 = (off_u64 > (u64)U32_MAX) ? U32_MAX : + (u32)off_u64; + atomic64_set(&ah->ptp_settsf_dur_ns, off_u32); + } + } + + if (ah && ah->ptp_settsf_log) + ah->ptp_settsf_log(ah, tsf_old, tsf64); + + if (ah && ah->ptp_wrap_rebase) + ah->ptp_wrap_rebase(ah, tsf64); } EXPORT_SYMBOL(ath9k_hw_settsf64); void ath9k_hw_reset_tsf(struct ath_hw *ah) { + u64 tsf_old = 0; + u64 tsf_read = 0; + if (ah) + atomic64_inc(&ah->ptp_tsf_reset_cnt); + if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, AH_TSF_WRITE_TIMEOUT)) ath_dbg(ath9k_hw_common(ah), RESET, "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); + if (ah && (ah->ptp_wrap_rebase || ah->ptp_resettsf_log)) { + tsf_old = ath9k_hw_gettsf64(ah); + } + + if (ah && ah->ptp_wrap_rebase) { + s64 offset = atomic64_read(&ah->ptp_tsf_offset); + s64 rem = atomic64_read(&ah->ptp_tsf_offset_ns_rem); + u64 phc_prev = (u64)((s64)tsf_old + offset); + + WRITE_ONCE(ah->ptp_rebase_phc, phc_prev); + WRITE_ONCE(ah->ptp_rebase_phc_rem_ns, rem); + WRITE_ONCE(ah->ptp_rebase_anchor_valid, true); + } + REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); + if (ah && (ah->ptp_wrap_rebase || ah->ptp_resettsf_log)) + tsf_read = ath9k_hw_gettsf64(ah); + + if (ah && ah->ptp_resettsf_log) + ah->ptp_resettsf_log(ah, tsf_old, tsf_read); + + if (ah && ah->ptp_wrap_rebase) { + ah->ptp_wrap_rebase(ah, tsf_read); + } } EXPORT_SYMBOL(ath9k_hw_reset_tsf); @@ -3108,6 +3490,9 @@ static const struct ath_gen_timer_configuration gen_tmr_configuration[] = u32 ath9k_hw_gettsf32(struct ath_hw *ah) { + if (ah) + atomic64_inc(&ah->ptp_tsf_get32_cnt); + return REG_READ(ah, AR_TSF_L32); } EXPORT_SYMBOL(ath9k_hw_gettsf32); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 69f30ebccf29..0c98fa3f7616 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -236,8 +236,7 @@ enum ath_hw_txq_subtype { ATH_TXQ_AC_BK = 0, ATH_TXQ_AC_BE = 1, - // focus: wiptp - ATH_TXQ_AC_VI = 5, // FIXME: wiptp -> 5 / default -> 2 + ATH_TXQ_AC_VI = 5, ATH_TXQ_AC_VO = 3, }; @@ -779,6 +778,33 @@ struct ath_hw { struct device *dev; struct ieee80211_hw *hw; struct ath_common common; + /* Optional callback for PTP wrap rebase on TSF set. */ + void (*ptp_wrap_rebase)(struct ath_hw *ah, u64 tsf); + /* Optional callback to log TSF set operations. */ + void (*ptp_settsf_log)(struct ath_hw *ah, u64 tsf_old, u64 tsf_new); + /* Optional callback to log TSF reset operations. */ + void (*ptp_resettsf_log)(struct ath_hw *ah, u64 tsf_old, u64 tsf_new); + /* Offset to keep PHC monotonic across TSF steps. */ + atomic64_t ptp_tsf_offset; + atomic64_t ptp_tsf_offset_ns_rem; + atomic64_t ptp_settsf_dur_ns; + u64 ptp_settsf_read_start_ns; + u64 ptp_settsf_read_end_ns; + u64 ptp_settsf_write_start_ns; + u64 ptp_settsf_write_end_ns; + u32 ptp_settsf_read_ns; + u32 ptp_settsf_write_ns; + u32 ptp_settsf_total_ns; + atomic64_t ptp_tsf_get64_cnt; + atomic64_t ptp_tsf_get32_cnt; + atomic64_t ptp_tsf_set_cnt; + atomic64_t ptp_tsf_reset_cnt; + atomic64_t ptp_tsf_cc_read_cnt; + atomic64_t ptp_tsf_aic_read_cnt; + /* PHC anchor captured before a TSF rebase. */ + u64 ptp_rebase_phc; + s64 ptp_rebase_phc_rem_ns; + bool ptp_rebase_anchor_valid; struct ath9k_hw_version hw_version; struct ath9k_ops_config config; struct ath9k_hw_capabilities caps; @@ -814,6 +840,8 @@ struct ath_hw { bool htc_reset_init; enum nl80211_iftype opmode; + u8 ptp_sta_hw_reset_restore_enable; + u8 ptp_hw_reset_restore_rebase_enable; enum ath9k_power_mode power_mode; s8 noise; @@ -1068,6 +1096,7 @@ u64 ath9k_hw_gettsf64(struct ath_hw *ah); void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); void ath9k_hw_reset_tsf(struct ath_hw *ah); u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur); +u64 ath9k_hw_get_tsf_offset64(struct timespec64 *last, struct timespec64 *cur); void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set); void ath9k_hw_init_global_settings(struct ath_hw *ah); u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index b1ce3219f2f8..8d9d1438a4e3 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include "ath9k.h" @@ -80,6 +81,105 @@ int ath9k_use_msi; module_param_named(use_msi, ath9k_use_msi, int, 0444); MODULE_PARM_DESC(use_msi, "Use MSI instead of INTx if possible"); +static unsigned int ath9k_ptp_evtlog_len = ATH9K_PTP_EVTLOG_LEN_DEFAULT; +module_param_named(ptp_evtlog_len, ath9k_ptp_evtlog_len, uint, 0444); +MODULE_PARM_DESC(ptp_evtlog_len, + "PTP evtlog ring length (power-of-two, 0=disable)"); + +static unsigned int ath9k_ptp_tc_trace_len = ATH9K_PTP_TC_TRACE_LEN_DEFAULT; +module_param_named(ptp_tc_trace_len, ath9k_ptp_tc_trace_len, uint, 0444); +MODULE_PARM_DESC(ptp_tc_trace_len, + "PTP tc trace ring length (power-of-two, 0=disable)"); + +static u32 ath9k_ptp_sanitize_len(u32 req) +{ + if (!req) + return 0; + if (req < ATH9K_PTP_RING_MIN) + req = ATH9K_PTP_RING_MIN; + if (req > ATH9K_PTP_RING_MAX) + req = ATH9K_PTP_RING_MAX; + req = rounddown_pow_of_two(req); + if (req < ATH9K_PTP_RING_MIN) + req = ATH9K_PTP_RING_MIN; + return req; +} + +static u32 ath9k_ptp_alloc_ring(struct ath_softc *sc, size_t entry_size, + u32 req_len, void **out, const char *name) +{ + u32 len = req_len; + void *buf = NULL; + + *out = NULL; + if (!len) + return 0; + + while (len >= ATH9K_PTP_RING_MIN) { + buf = devm_kcalloc(sc->dev, len, entry_size, GFP_KERNEL); + if (buf) + break; + len >>= 1; + } + + if (!buf) { + dev_warn(sc->dev, + "ptp: failed to allocate %s ring (req=%u)\n", + name, req_len); + return 0; + } + + if (len != req_len) + dev_warn(sc->dev, + "ptp: %s ring reduced %u->%u entries\n", + name, req_len, len); + + *out = buf; + return len; +} + +static void ath9k_ptp_init_rings(struct ath_softc *sc) +{ + u32 req_evt = ath9k_ptp_sanitize_len(ath9k_ptp_evtlog_len); + u32 req_tc = ath9k_ptp_sanitize_len(ath9k_ptp_tc_trace_len); + u32 req_handoff = ath9k_ptp_sanitize_len(ATH9K_PTP_HANDOFF_LEN_DEFAULT); + u32 len; + + sc->ptp_evtlog = NULL; + sc->ptp_evtlog_len = 0; + sc->ptp_evtlog_mask = 0; + sc->ptp_tc_trace = NULL; + sc->ptp_tc_trace_len = 0; + sc->ptp_tc_trace_mask = 0; + sc->ptp_handoff = NULL; + sc->ptp_handoff_len = 0; + sc->ptp_handoff_mask = 0; + + len = ath9k_ptp_alloc_ring(sc, sizeof(*sc->ptp_evtlog), + req_evt, (void **)&sc->ptp_evtlog, + "evtlog"); + if (len) { + sc->ptp_evtlog_len = len; + sc->ptp_evtlog_mask = len - 1; + } + + len = ath9k_ptp_alloc_ring(sc, sizeof(*sc->ptp_tc_trace), + req_tc, (void **)&sc->ptp_tc_trace, + "tc_trace"); + if (len) { + sc->ptp_tc_trace_len = len; + sc->ptp_tc_trace_mask = len - 1; + } + + len = ath9k_ptp_alloc_ring(sc, sizeof(*sc->ptp_handoff), + req_handoff, (void **)&sc->ptp_handoff, + "handoff"); + if (len) { + sc->ptp_handoff_len = len; + sc->ptp_handoff_mask = len - 1; + } +} + bool is_ath9k_unloaded; #ifdef CONFIG_MAC80211_LEDS @@ -198,13 +298,6 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset) val = ioread32(sc->mem + reg_offset); spin_unlock_irqrestore(&sc->sc_serial_rw, flags); } else { - // focus: wiptp - // if (reg_offset == AR_TSF_L32) { - // printk("ath9k: ioread32 unlock L32"); - // } else if (reg_offset == AR_TSF_U32) { - // printk("ath9k: ioread32 unlock U32"); - // } - val = ioread32(sc->mem + reg_offset); } return val; @@ -734,6 +827,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, set_bit(ATH_OP_INVALID, &common->op_flags); sc->sc_ah = ah; + ath9k_ptp_init_rings(sc); sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET); sc->tx99_power = MAX_RATE_POWER + 1; init_waitqueue_head(&sc->tx_wait); @@ -1074,6 +1168,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, ah = sc->sc_ah; common = ath9k_hw_common(ah); ath9k_set_hw_capab(sc, hw); + sc->ptp_hwtstamp_tx_type = HWTSTAMP_TX_OFF; + sc->ptp_hwtstamp_rx_filter = HWTSTAMP_FILTER_NONE; /* Initialize regulatory */ error = ath_regd_init(&common->regulatory, sc->hw->wiphy, diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index b7f9953467c3..77cd5c2e8eb5 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -370,7 +370,6 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) struct ath9k_tx_queue_info *qi; u32 cwMin, chanCwMin, value; - // focus: wiptp int hcfenabled = 0; @@ -382,13 +381,11 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q); - // focus: wiptp if (q == ATH_TXQ_AC_VI) { qi->tqi_cwmin = qi->tqi_cwmax = 0; qi->tqi_aifs = 0; qi->tqi_cbrPeriod = 5000; qi->tqi_cbrOverflowLimit = 0; - //hcfenabled = 1; } if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { @@ -426,12 +423,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR | (qi->tqi_cbrOverflowLimit ? AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0)); - // focus: wiptp - // REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_ONE_SHOT_EN); } if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { - // focus: wiptp - // printk("ath9k: tqi set rdytime: %d\n", q); // FIXME: REG_WRITE(ah, AR_QRDYTIMECFG(q), SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | AR_Q_RDYTIMECFG_EN); @@ -511,7 +504,6 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) break; } - // focus: wiptp if (hcfenabled) { REG_SET_BIT(ah, AR_QMISC(q), 0x00000016); } diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 410a024dbb3d..cf5c65ebbecb 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -16,7 +16,7 @@ #include #include -// focus: wiptp +#include #include #include "ath9k.h" @@ -240,6 +240,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); unsigned long flags; + bool primary_sta; ath9k_calculate_summary_state(sc, sc->cur_chan); ath_startrecv(sc); @@ -247,23 +248,30 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) sc->cur_chan->txpower, &sc->cur_chan->cur_txpower); clear_bit(ATH_OP_HW_RESET, &common->op_flags); + primary_sta = ah->opmode == NL80211_IFTYPE_STATION && + test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags); if (!sc->cur_chan->offchannel && start) { - /* restore per chanctx TSF timer */ - if (sc->cur_chan->tsf_val) { - u32 offset; - - offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, - NULL); - ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset); + /* + * Managed STA syncs its TSF from AP beacons after reset. Do + * not replay a synthetic chanctx snapshot before that beacon + * sync, otherwise PTP can preserve a stale slave time base. + */ + if (sc->cur_chan->tsf_val && !primary_sta) { + u64 offset; + + offset = ath9k_hw_get_tsf_offset64(&sc->cur_chan->tsf_ts, + NULL); + if (READ_ONCE(sc->ptp_settsf_enable)) + ath9k_hw_settsf64(ah, + sc->cur_chan->tsf_val + offset); } if (!test_bit(ATH_OP_BEACONS, &common->op_flags)) goto work; - if (ah->opmode == NL80211_IFTYPE_STATION && - test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) { + if (primary_sta) { spin_lock_irqsave(&sc->sc_pm_lock, flags); sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; spin_unlock_irqrestore(&sc->sc_pm_lock, flags); @@ -389,14 +397,12 @@ void ath9k_tasklet(struct tasklet_struct *t) u32 status; u32 rxmask; - // focus: wiptp ktime_t tstamp; spin_lock_irqsave(&sc->intr_lock, flags); status = sc->intrstatus; sc->intrstatus = 0; - // focus: wiptp tstamp = sc->intrtstamp; spin_unlock_irqrestore(&sc->intr_lock, flags); @@ -448,8 +454,6 @@ void ath9k_tasklet(struct tasklet_struct *t) */ ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n"); - // focus: wiptp - // sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC; } spin_unlock_irqrestore(&sc->sc_pm_lock, flags); @@ -463,11 +467,9 @@ void ath9k_tasklet(struct tasklet_struct *t) /* Check for high priority Rx first */ if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && (status & ATH9K_INT_RXHP)) { - // focus: wiptp ath_rx_tasklet(sc, 0, true, &tstamp); } - // focus: wiptp ath_rx_tasklet(sc, 0, false, &tstamp); } @@ -481,7 +483,6 @@ void ath9k_tasklet(struct tasklet_struct *t) */ sc->gtt_cnt = 0; - // focus: wiptp ath_tx_edma_tasklet(sc, &tstamp); } else { @@ -528,9 +529,7 @@ irqreturn_t ath_isr(int irq, void *dev) u32 sync_cause = 0; bool sched = false; - // focus: wiptp ktime_t isr_tstamp = ktime_get_real(); - // printk("ath9k: ath_isr isr_tstamp=%lld, tsf64=%u\n", isr_tstamp, ath9k_hw_gettsf32(ah)); /* @@ -572,7 +571,6 @@ irqreturn_t ath_isr(int irq, void *dev) sc->intrstatus |= status; - // focus: wiptp sc->intrtstamp = isr_tstamp; spin_unlock(&sc->intr_lock); @@ -788,8 +786,6 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; unsigned long flags; - // focus: wiptp - // ath_warn(common, "ath9k_tx skb=%p, skb->sk=%p\n", skb, skb->sk); // TODO: only debug if (sc->ps_enabled) { /* @@ -1932,6 +1928,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, (changed & BSS_CHANGED_BEACON_INT) || (changed & BSS_CHANGED_BEACON_INFO)) { ath9k_calculate_summary_state(sc, avp->chanctx); + + if ((changed & BSS_CHANGED_BEACON_ENABLED) && + bss_conf->enable_beacon && + vif->type == NL80211_IFTYPE_AP) + ath9k_ptp_mode_reset(sc); } if ((avp->chanctx == sc->cur_chan) && @@ -1986,7 +1987,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) tsf = ath9k_hw_gettsf64(sc->sc_ah); } else { tsf = sc->cur_chan->tsf_val + - ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL); + ath9k_hw_get_tsf_offset64(&sc->cur_chan->tsf_ts, NULL); } tsf += le64_to_cpu(avp->tsf_adjust); ath9k_ps_restore(sc); @@ -1999,12 +2000,30 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf) { + struct ath_softc *sc = hw->priv; struct ath_vif *avp = (void *)vif->drv_priv; + if (!READ_ONCE(sc->ptp_settsf_enable)) + return; + mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); tsf -= le64_to_cpu(avp->tsf_adjust); + if (!ath9k_ptp_guard_allow_settsf(sc)) { + ath9k_ps_restore(sc); + mutex_unlock(&sc->mutex); + return; + } + { + u64 now_ns = ktime_get_ns(); + u64 last_evt = READ_ONCE(sc->ptp_last_event_ns); + + WRITE_ONCE(sc->ptp_settsf_last_ns, now_ns); + if (last_evt) + WRITE_ONCE(sc->ptp_settsf_last_delta_ns, + now_ns - last_evt); + } ktime_get_raw_ts64(&avp->chanctx->tsf_ts); if (sc->cur_chan == avp->chanctx) ath9k_hw_settsf64(sc->sc_ah, tsf); @@ -2021,6 +2040,17 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); + /* + * A fresh IBSS bring-up reaches reset_tsf() before the new IBSS state + * is published to the driver. Clear any stale PTP rebase/offset state + * first so the upcoming reset_tsf()/wrap_rebase sequence does not + * consume an offset carried over from an earlier run. + * + * IBSS merges arrive here while vif->cfg.ibss_joined is still true, so + * leave those on the normal reset_tsf rebase path. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && !vif->cfg.ibss_joined) + ath9k_ptp_mode_reset(sc); ktime_get_raw_ts64(&avp->chanctx->tsf_ts); if (sc->cur_chan == avp->chanctx) ath9k_hw_reset_tsf(sc->sc_ah); @@ -2800,6 +2830,84 @@ static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return 0; } +static int ath9k_get_ts_info(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_ts_info *info) +{ + struct ath_softc *sc = hw->priv; + + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + info->phc_index = sc->ptp_clock ? ptp_clock_index(sc->ptp_clock) : -1; + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + info->rx_filters = + BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + + return 0; +} + +static int ath9k_hwtstamp_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct kernel_hwtstamp_config *cfg) +{ + struct ath_softc *sc = hw->priv; + + cfg->flags = 0; + cfg->tx_type = sc->ptp_hwtstamp_tx_type; + cfg->rx_filter = sc->ptp_hwtstamp_rx_filter; + cfg->source = HWTSTAMP_SOURCE_NETDEV; + + return 0; +} + +static int ath9k_hwtstamp_set(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + struct ath_softc *sc = hw->priv; + + (void)extack; + + if (cfg->flags) + return -EINVAL; + + switch (cfg->tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (cfg->rx_filter) { + case HWTSTAMP_FILTER_NONE: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + break; + default: + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + } + + sc->ptp_hwtstamp_tx_type = cfg->tx_type; + sc->ptp_hwtstamp_rx_filter = cfg->rx_filter; + cfg->source = HWTSTAMP_SOURCE_NETDEV; + + return 0; +} + struct ieee80211_ops ath9k_ops = { .tx = ath9k_tx, .start = ath9k_start, @@ -2847,18 +2955,214 @@ struct ieee80211_ops ath9k_ops = { .sw_scan_start = ath9k_sw_scan_start, .sw_scan_complete = ath9k_sw_scan_complete, .get_txpower = ath9k_get_txpower, + .get_ts_info = ath9k_get_ts_info, + .hwtstamp_get = ath9k_hwtstamp_get, + .hwtstamp_set = ath9k_hwtstamp_set, .wake_tx_queue = ath9k_wake_tx_queue, }; -// focus: wiptp -void ath9k_cyc2hwtstamp(struct ath_softc *sc, struct skb_shared_hwtstamps *hwtstamps, u32 cycle) { - u64 ns; - unsigned long flags; +static void ath9k_timecounter_set_cycle(struct timecounter *tc, u64 cycle) +{ + u64 delta = (cycle - tc->cycle_last) & tc->cc->mask; - spin_lock_irqsave(&sc->systim_lock, flags); - ns = timecounter_cyc2time(&sc->tc, (u64)cycle); - spin_unlock_irqrestore(&sc->systim_lock, flags); + if (delta > (tc->cc->mask >> 1)) { + delta = (tc->cycle_last - cycle) & tc->cc->mask; + tc->nsec -= ath9k_cc_cyc2ns_backwards_chunked(tc->cc, delta, + tc->mask, + tc->frac); + } else { + u64 frac = tc->frac; + + tc->nsec += ath9k_cc_cyc2ns_chunked(tc->cc, delta, + tc->mask, &frac); + tc->frac = frac; + } + tc->cycle_last = cycle; +} + +/* Convert a cycle value to hwtstamp and capture sample metadata. */ +void ath9k_cyc2hwtstamp_sample(struct ath_softc *sc, + struct skb_shared_hwtstamps *hwtstamps, + u64 cycle, + struct ath9k_ptp_sample *sample) +{ + u64 cycle_adj = cycle; + u64 ns; + u64 tc_cycle_last; + u64 tc_nsec; + u64 tc_frac; + u64 adj_seq; + u64 prev_ns; + u64 prev_tc_nsec; + u64 rebase_cnt; + u64 tsf64 = 0; + u64 anchor_cycle = 0; + bool use_anchor = false; + u32 sample_epoch = 0; + bool sample_epoch_valid = false; + s64 offset; + s64 offset_rem; + u8 reason; + unsigned long flags; + struct cyclecounter cc_snap; + struct timecounter tc_snap; + + spin_lock_irqsave(&sc->systim_lock, flags); + cc_snap = sc->cc; + tc_snap = sc->tc; + tc_snap.cc = &cc_snap; + if (sample) { + sample_epoch = READ_ONCE(sample->epoch); + sample_epoch_valid = READ_ONCE(sample->epoch_valid); + } + if (sample_epoch_valid && sample_epoch != sc->ptp_epoch) { + sc->ptp_epoch_drop_cnt++; + tc_cycle_last = tc_snap.cycle_last; + tc_nsec = tc_snap.nsec; + tc_frac = tc_snap.frac; + adj_seq = sc->ptp_adj_seq; + spin_unlock_irqrestore(&sc->systim_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + if (sample) { + WRITE_ONCE(sample->cycle, cycle); + WRITE_ONCE(sample->ns, 0); + WRITE_ONCE(sample->tc_cycle_last, tc_cycle_last); + WRITE_ONCE(sample->tc_nsec, tc_nsec); + WRITE_ONCE(sample->tc_frac, tc_frac); + WRITE_ONCE(sample->adj_seq, adj_seq); + } + return; + } + if (sample) { + tsf64 = READ_ONCE(sample->tsf64); + use_anchor = (sample == &sc->ptp_sample_rx || + sample == &sc->ptp_sample_tx); + if (READ_ONCE(sample->epoch_valid)) { + offset = READ_ONCE(sample->tsf_offset); + offset_rem = READ_ONCE(sample->tsf_offset_ns_rem); + } else { + offset = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + offset_rem = atomic64_read(&sc->sc_ah->ptp_tsf_offset_ns_rem); + WRITE_ONCE(sample->tsf_offset, offset); + WRITE_ONCE(sample->tsf_offset_ns_rem, offset_rem); + } + } else { + offset = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + offset_rem = atomic64_read(&sc->sc_ah->ptp_tsf_offset_ns_rem); + } + if (offset) { + s64 tmp = (s64)cycle + offset; + + if (tmp < 0) { + cycle_adj = tc_snap.cycle_last; + use_anchor = false; + } else { + cycle_adj = (u64)tmp; + } + } + reason = ATH9K_PTP_TC_TRACE_RSN_NONE; + if (sample == &sc->ptp_sample_rx) + reason = ATH9K_PTP_TC_TRACE_RSN_RX; + else if (sample == &sc->ptp_sample_tx) + reason = ATH9K_PTP_TC_TRACE_RSN_TX; - memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(ns); + if (use_anchor && tsf64) { + s64 tmp = (s64)tsf64 + offset; + + if (tmp < 0) + anchor_cycle = tc_snap.cycle_last; + else + anchor_cycle = (u64)tmp; + { + u64 cycle_last = tc_snap.cycle_last; + u64 delta = (anchor_cycle - cycle_last) & tc_snap.cc->mask; + bool backward = delta > (tc_snap.cc->mask >> 1); + u64 delta_use; + u64 frac_local = tc_snap.frac; + u64 ns_offset; + u64 ns_out; + + if (backward) { + delta_use = (cycle_last - anchor_cycle) & tc_snap.cc->mask; + ns_offset = ath9k_cc_cyc2ns_backwards_chunked(tc_snap.cc, + delta_use, + tc_snap.mask, + frac_local); + ns_out = tc_snap.nsec - ns_offset; + } else { + delta_use = delta; + ns_offset = ath9k_cc_cyc2ns_chunked(tc_snap.cc, + delta_use, + tc_snap.mask, + &frac_local); + ns_out = tc_snap.nsec + ns_offset; + } + + ath9k_ptp_tc_trace_add(sc, ATH9K_PTP_TC_TRACE_SET_CYCLE, + reason, backward ? 1 : 0, + anchor_cycle, cycle_last, + tc_snap.nsec, tc_snap.frac, + delta, ns_offset, ns_out); + } + ath9k_timecounter_set_cycle(&tc_snap, anchor_cycle); + } + ns = timecounter_cyc2time(&tc_snap, cycle_adj); + if (offset_rem) { + s64 ns_signed = (s64)ns + offset_rem; + + if (ns_signed < 0) + ns = 0; + else + ns = (u64)ns_signed; + } + tc_cycle_last = tc_snap.cycle_last; + tc_nsec = tc_snap.nsec; + tc_frac = tc_snap.frac; + adj_seq = sc->ptp_adj_seq; + prev_ns = READ_ONCE(sc->ptp_hwt_last_ns); + prev_tc_nsec = READ_ONCE(sc->ptp_hwt_last_tc_nsec); + rebase_cnt = sample ? READ_ONCE(sample->rebase_cnt) : + READ_ONCE(sc->ptp_wrap_rebase_cnt); + if (prev_ns && ns < prev_ns) + ath9k_ptp_hwt_anomaly_record(sc, reason, prev_ns, ns, + prev_tc_nsec, tc_nsec, + cycle, cycle_adj, + tc_cycle_last, tsf64, + offset, offset_rem, + sample_epoch_valid ? + sample_epoch : + READ_ONCE(sc->ptp_epoch), + rebase_cnt); + WRITE_ONCE(sc->ptp_hwt_last_ns, ns); + WRITE_ONCE(sc->ptp_hwt_last_tc_nsec, tc_nsec); + spin_unlock_irqrestore(&sc->systim_lock, flags); + + { + ath9k_ptp_tc_trace_cyc2time(sc, cycle_adj, tc_cycle_last, + tc_nsec, tc_frac, ns, reason); + } + + ath9k_ptp_mon_update(&sc->ptp_mon.phc_ns, ns); + if (sample) { + if (sample == &sc->ptp_sample_rx) + sc->ptp_rx_hwts_cnt++; + WRITE_ONCE(sample->cycle, cycle_adj); + WRITE_ONCE(sample->ns, ns); + WRITE_ONCE(sample->tc_cycle_last, tc_cycle_last); + WRITE_ONCE(sample->tc_nsec, tc_nsec); + WRITE_ONCE(sample->tc_frac, tc_frac); + WRITE_ONCE(sample->adj_seq, adj_seq); + } + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +/* Convert a cycle value to hwtstamp without recording a sample. */ +void ath9k_cyc2hwtstamp(struct ath_softc *sc, + struct skb_shared_hwtstamps *hwtstamps, + u64 cycle) +{ + ath9k_cyc2hwtstamp_sample(sc, hwtstamps, cycle, NULL); } diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 5eb8d327b674..25683fb7237f 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -18,9 +18,9 @@ #include #include +#include #include -// focus: wiptp #include #include "ath9k.h" @@ -887,66 +887,122 @@ static const struct ath_bus_ops ath_pci_bus_ops = { .aspm_init = ath_pci_aspm_init, }; -// focus: wiptp start -static u64 ath9k_cyclecounter_read(const struct cyclecounter *cc) { - struct ath_softc *sc = container_of(cc, struct ath_softc, cc); - return ath9k_hw_gettsf32(sc->sc_ah); -} +#define ATH9K_CC_TSF_READ_MAX 10 +/* Read TSF64 and apply PTP offset, tracking regressions for debugfs. */ +static u64 ath9k_cyclecounter_read(const struct cyclecounter *cc) +{ + struct ath_softc *sc = container_of(cc, struct ath_softc, cc); + struct ath_hw *ah = sc->sc_ah; + s64 offset = atomic64_read(&ah->ptp_tsf_offset); + u32 hi1; + u32 lo; + u32 hi2; + u64 tsf64; + u64 val; + u64 last; + u64 last_raw; + u64 thresh_ns; + u64 host_ns; + s64 raw_delta; + s64 cc_delta; + s64 offset_delta; + s64 delta; + s64 phc; + int i; + + atomic64_inc(&ah->ptp_tsf_cc_read_cnt); + + hi1 = REG_READ(ah, AR_TSF_U32); + for (i = 0; i < ATH9K_CC_TSF_READ_MAX; i++) { + lo = REG_READ(ah, AR_TSF_L32); + hi2 = REG_READ(ah, AR_TSF_U32); + if (hi2 == hi1) + break; + hi1 = hi2; + } + + tsf64 = ((u64)hi2 << 32) | lo; + last = READ_ONCE(sc->ptp_cc_last); + last_raw = READ_ONCE(sc->ptp_cc_last_raw); + phc = (s64)tsf64 + offset; + if (unlikely(phc < 0)) { + /* Avoid unsigned wrap when offset underflows below zero. */ + delta = phc - (s64)last; + WRITE_ONCE(sc->ptp_cc_glitch_cnt, + READ_ONCE(sc->ptp_cc_glitch_cnt) + 1); + WRITE_ONCE(sc->ptp_cc_glitch_ts_ns, ktime_get_ns()); + WRITE_ONCE(sc->ptp_cc_glitch_last, last); + WRITE_ONCE(sc->ptp_cc_glitch_new, last); + WRITE_ONCE(sc->ptp_cc_glitch_delta, delta); + WRITE_ONCE(sc->ptp_cc_glitch_raw, tsf64); + WRITE_ONCE(sc->ptp_cc_glitch_offset, offset); + WRITE_ONCE(sc->ptp_cc_glitch_hi1, hi1); + WRITE_ONCE(sc->ptp_cc_glitch_lo, lo); + WRITE_ONCE(sc->ptp_cc_glitch_hi2, hi2); + WRITE_ONCE(sc->ptp_cc_glitch_pid, 0); + sc->ptp_cc_glitch_comm[0] = '\0'; + WRITE_ONCE(sc->ptp_cc_glitch_valid, 1); + ath9k_ptp_evtlog_cc_glitch(sc, tsf64, hi1, lo, hi2, + offset, last, last, delta); + val = last; + goto out_update; + } + val = (u64)phc; + thresh_ns = READ_ONCE(sc->ptp_tc_anomaly_thresh_ns); + if (thresh_ns && last && last_raw && tsf64 >= last_raw && val >= last) { + raw_delta = (s64)tsf64 - (s64)last_raw; + cc_delta = (s64)val - (s64)last; + offset_delta = cc_delta - raw_delta; + if (offset_delta > (s64)thresh_ns || + offset_delta < -(s64)thresh_ns) { + host_ns = ktime_get_ns(); + ath9k_ptp_tc_anomaly_record(sc, + ATH9K_PTP_TC_ANOM_SRC_CC, + ATH9K_PTP_TC_TRACE_RSN_NONE, + host_ns, host_ns, 0, 0, + val, last, + tsf64, last_raw, + offset, offset_delta, 0); + } + } + if (last && val < last) { + delta = (s64)val - (s64)last; + WRITE_ONCE(sc->ptp_cc_glitch_cnt, + READ_ONCE(sc->ptp_cc_glitch_cnt) + 1); + WRITE_ONCE(sc->ptp_cc_glitch_ts_ns, ktime_get_ns()); + WRITE_ONCE(sc->ptp_cc_glitch_last, last); + WRITE_ONCE(sc->ptp_cc_glitch_new, val); + WRITE_ONCE(sc->ptp_cc_glitch_delta, delta); + WRITE_ONCE(sc->ptp_cc_glitch_raw, tsf64); + WRITE_ONCE(sc->ptp_cc_glitch_offset, offset); + WRITE_ONCE(sc->ptp_cc_glitch_hi1, hi1); + WRITE_ONCE(sc->ptp_cc_glitch_lo, lo); + WRITE_ONCE(sc->ptp_cc_glitch_hi2, hi2); + WRITE_ONCE(sc->ptp_cc_glitch_pid, 0); + sc->ptp_cc_glitch_comm[0] = '\0'; + WRITE_ONCE(sc->ptp_cc_glitch_valid, 1); + ath9k_ptp_evtlog_cc_glitch(sc, tsf64, hi1, lo, hi2, + offset, last, val, delta); + /* + * The timecounter core assumes the underlying cyclecounter is + * monotonic. Returning a regressed value here can turn a local + * TSF correction into an undefined wrap-sized jump for PHC + * readers such as phc2sys. Clamp to the last exported value and + * keep the regression only in the debug record. + */ + val = last; + } + +out_update: + WRITE_ONCE(sc->ptp_cc_last, val); + WRITE_ONCE(sc->ptp_cc_last_raw, tsf64); + WRITE_ONCE(sc->ptp_cc_last_hi1, hi1); + WRITE_ONCE(sc->ptp_cc_last_lo, lo); + WRITE_ONCE(sc->ptp_cc_last_hi2, hi2); -static enum hrtimer_restart ath_off_timer_cb(struct hrtimer *t) { - struct ath_softc *sc = container_of(t, struct ath_softc, off_timer); - struct ath_hw *ah = sc->sc_ah; - struct ptp_clock_info *ptp = &sc->ptp_clock_info; - ktime_t kt1, kt3; - u64 t1, t2, t3; - s64 offset; - struct timespec64 ts; - u64 next; - int i; - - if (sc->off_counter == 0) { - ath9k_hw_ops(ah)->config_pci_powersave(ah, false); - } - - for (i = 0; i < 1024; ++i) { - kt1 = ktime_get(); - ptp->gettime64(ptp, &ts); - kt3 = ktime_get(); - t3 = ktime_to_ns(kt3); - t1 = ktime_to_ns(kt1); - // printk("ath9k timer: %lld\n", t3 - t1); // TODO: only debug - } - - // TODO: all lines from here are never reached - return HRTIMER_NORESTART; - - if (!sc->off_base_time) { - sc->off_base_time = ktime_to_ns(ktime_get()); - } - - next = sc->off_base_time + sc->off_counter * ktime_to_ns(sc->off_interval); - hrtimer_forward(t, ns_to_ktime(next), sc->off_interval); - - if (sc->off_counter++ >= 1024) { - return HRTIMER_NORESTART; - } - - t2 = ath9k_cyclecounter_read(&sc->cc); - - offset = t2 - sc->off_last; - - /* - if (t3 - t1 > 3000) { - return HRTIMER_RESTART; - } - */ - - // printk("ath9k: tsf value = %lld, diff=%lld\n", (s64)t2, offset); // TODO: only debug - sc->off_last = t2; - - return HRTIMER_RESTART; + return val; } -// focus: wiptp end + static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -1056,26 +1112,28 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n", hw_name, sc->mem, pdev->irq); - // focus: wiptp start sc->cc.read = ath9k_cyclecounter_read; - sc->cc.mask = CYCLECOUNTER_MASK(32); - sc->cc.shift = ATH9K_PTP_FAKE_SHIFT; + sc->cc.mask = CYCLECOUNTER_MASK(64); + /* TSF ticks are 1 MHz (1 us). Use exact 1:1000 scaling to ns. */ + sc->cc.shift = 21; sc->cc_mult = clocksource_khz2mult(1000, sc->cc.shift); sc->cc.mult = sc->cc_mult; spin_lock_init(&sc->systim_lock); + atomic64_set(&sc->tsf64_last, ath9k_hw_gettsf32(sc->sc_ah)); + + { + u64 init_ns = ktime_get_clocktai_ns(); + + timecounter_init(&sc->tc, &sc->cc, init_ns); + ath9k_ptp_init(sc); + ath9k_ptp_tc_mutation_record(sc, ATH9K_PTP_TC_MUT_INIT, + init_ns, 0, sc->cc.shift, + 0, 0, 0, + sc->tc.nsec, + sc->tc.cycle_last, + sc->tc.frac); + } - timecounter_init(&sc->tc, &sc->cc, ktime_to_ns(ktime_get_real())); - - ath9k_ptp_init(sc); - - sc->off_last = 0; - sc->off_interval = ktime_set(0, 1000000); - sc->off_base_time = 0; - sc->off_counter = 0; - hrtimer_init(&sc->off_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - sc->off_timer.function = &ath_off_timer_cb; - // hrtimer_start(&sc->off_timer, ktime_set(30, 0), HRTIMER_MODE_REL); - // focus: wiptp end return 0; @@ -1091,8 +1149,6 @@ static void ath_pci_remove(struct pci_dev *pdev) struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath_softc *sc = hw->priv; - // focus: wiptp - hrtimer_cancel(&sc->off_timer); ath9k_ptp_remove(sc); if (!is_ath9k_unloaded) diff --git a/drivers/net/wireless/ath/ath9k/ptp.c b/drivers/net/wireless/ath/ath9k/ptp.c index e8f5fb7ba211..bec981fb7a9d 100644 --- a/drivers/net/wireless/ath/ath9k/ptp.c +++ b/drivers/net/wireless/ath/ath9k/ptp.c @@ -3,73 +3,395 @@ #include #include #include +#include +#include +#include -// focus: //FIME: check delta (s32) vs. scaled_ppm (long) handling +/* Capture a short stack trace for settsf/reset/adj events. */ +static void ath9k_ptp_capture_stack(struct ath_softc *sc, u8 event, s64 arg) +{ + unsigned long flags; + unsigned long entries[ATH9K_PTP_STACK_DEPTH]; + u32 len; + + if (!READ_ONCE(sc->ptp_stack_enable)) + return; + + len = stack_trace_save(entries, ATH9K_PTP_STACK_DEPTH, 2); + + spin_lock_irqsave(&sc->systim_lock, flags); + sc->ptp_last_stack_len = len; + sc->ptp_last_stack_event = event; + sc->ptp_last_stack_arg = arg; + sc->ptp_last_stack_ns = ktime_get_ns(); + sc->ptp_last_stack_pid = 0; + sc->ptp_last_stack_comm[0] = '\0'; + if (len) + memcpy(sc->ptp_last_stack_entries, entries, + len * sizeof(entries[0])); + spin_unlock_irqrestore(&sc->systim_lock, flags); +} + +/* Log settsf events and capture call stacks when enabled. */ +static void ath9k_ptp_settsf_log_cb(struct ath_hw *ah, u64 tsf_old, u64 tsf_new) +{ + struct ath_softc *sc; + s64 delta_tsf; + s64 delta_ns; + + if (!ah || !ah->hw) + return; + sc = ah->hw->priv; + if (!sc) + return; + + delta_tsf = (s64)tsf_new - (s64)tsf_old; + delta_ns = delta_tsf * 1000; /* TSF ticks are 1us. */ + WRITE_ONCE(sc->ptp_settsf_cnt, READ_ONCE(sc->ptp_settsf_cnt) + 1); + WRITE_ONCE(sc->ptp_settsf_last_jump_ns, delta_ns); + + ath9k_ptp_capture_stack(sc, ATH9K_PTP_EVT_SETTSF, + delta_tsf); + + if (READ_ONCE(sc->ptp_evtlog_enable)) + ath9k_ptp_evtlog_settsf(sc, tsf_old, tsf_new); +} + +/* Log TSF reset events and capture call stacks when enabled. */ +static void ath9k_ptp_resettsf_log_cb(struct ath_hw *ah, u64 tsf_old, u64 tsf_new) +{ + struct ath_softc *sc; + + if (!ah || !ah->hw) + return; + sc = ah->hw->priv; + if (!sc) + return; + + ath9k_ptp_capture_stack(sc, ATH9K_PTP_EVT_RESET_TSF, + (s64)tsf_new - (s64)tsf_old); + + if (READ_ONCE(sc->ptp_evtlog_enable)) + ath9k_ptp_evtlog_resettsf(sc, tsf_old, tsf_new); +} + +/* Read timecounter with trace and anomaly bookkeeping. */ +u64 ath9k_ptp_tc_read(struct ath_softc *sc, u8 reason) +{ + struct timecounter *tc = &sc->tc; + u64 cycle_now; + u64 cycle_last; + u64 nsec; + u64 frac; + u64 delta; + u64 ns_offset; + u64 frac_local; + u64 ns_out; + u64 host_ns; + u64 host_last_ns; + u64 ns_last; + u64 raw_now; + u64 raw_last; + u64 thresh_ns; + s64 host_delta_ns; + s64 ns_delta_ns; + s64 mismatch_ns; + s64 rem_ns; + s64 offset_ns; + bool trace_enable; + bool check_progress; + + lockdep_assert_held(&sc->systim_lock); + + rem_ns = atomic64_read(&sc->sc_ah->ptp_tsf_offset_ns_rem); + offset_ns = atomic64_read(&sc->sc_ah->ptp_tsf_offset) * + (s64)NSEC_PER_USEC + rem_ns; + trace_enable = READ_ONCE(sc->ptp_tc_trace_enable); + check_progress = reason == ATH9K_PTP_TC_TRACE_RSN_NONE || + reason == ATH9K_PTP_TC_TRACE_RSN_RX || + reason == ATH9K_PTP_TC_TRACE_RSN_TX || + reason == ATH9K_PTP_TC_TRACE_RSN_GETTIME; + + cycle_now = tc->cc->read(tc->cc); + cycle_last = tc->cycle_last; + nsec = tc->nsec; + frac = tc->frac; + delta = (cycle_now - cycle_last) & tc->cc->mask; + if (delta > (tc->cc->mask >> 1)) { + u64 delta_use = (cycle_last - cycle_now) & tc->cc->mask; + + frac_local = frac; + ns_offset = ath9k_cc_cyc2ns_chunked(tc->cc, delta_use, + tc->mask, &frac_local); + ns_out = nsec; + host_ns = ktime_get_ns(); + if (check_progress) { + raw_now = READ_ONCE(sc->ptp_cc_last_raw); + raw_last = READ_ONCE(sc->ptp_tc_last_raw); + ath9k_ptp_tc_anomaly_record(sc, ATH9K_PTP_TC_ANOM_SRC_TC, + reason, host_ns, + READ_ONCE(sc->ptp_tc_last_host_ns), + ns_out, + READ_ONCE(sc->ptp_tc_last_ns), + cycle_now, cycle_last, + raw_now, raw_last, + offset_ns, -(s64)ns_offset, + ns_offset); + } + if (trace_enable) + ath9k_ptp_tc_trace_add(sc, ATH9K_PTP_TC_TRACE_READ, + reason, 1, cycle_now, cycle_last, + nsec, frac, delta, ns_offset, + ns_out); + WRITE_ONCE(sc->ptp_tc_last_host_ns, host_ns); + WRITE_ONCE(sc->ptp_tc_last_ns, ns_out); + WRITE_ONCE(sc->ptp_tc_last_raw, READ_ONCE(sc->ptp_cc_last_raw)); + goto out_apply_rem; + } + frac_local = frac; + ns_offset = ath9k_cc_cyc2ns_chunked(tc->cc, delta, tc->mask, + &frac_local); + ns_out = nsec + ns_offset; + + tc->cycle_last = cycle_now; + tc->nsec = ns_out; + tc->frac = frac_local; + + host_ns = ktime_get_ns(); + if (check_progress) { + thresh_ns = READ_ONCE(sc->ptp_tc_anomaly_thresh_ns); + host_last_ns = READ_ONCE(sc->ptp_tc_last_host_ns); + ns_last = READ_ONCE(sc->ptp_tc_last_ns); + raw_now = READ_ONCE(sc->ptp_cc_last_raw); + raw_last = READ_ONCE(sc->ptp_tc_last_raw); + if (thresh_ns && host_last_ns && ns_last && host_ns > host_last_ns) { + host_delta_ns = (s64)(host_ns - host_last_ns); + ns_delta_ns = (s64)(ns_out - ns_last); + mismatch_ns = ns_delta_ns - host_delta_ns; + if (mismatch_ns > (s64)thresh_ns || + mismatch_ns < -(s64)thresh_ns) + ath9k_ptp_tc_anomaly_record(sc, + ATH9K_PTP_TC_ANOM_SRC_TC, + reason, host_ns, + host_last_ns, + ns_out, ns_last, + cycle_now, + cycle_last, + raw_now, raw_last, + offset_ns, mismatch_ns, + ns_offset); + } + } + if (trace_enable) + ath9k_ptp_tc_trace_add(sc, ATH9K_PTP_TC_TRACE_READ, + reason, 0, cycle_now, cycle_last, + nsec, frac, delta, ns_offset, ns_out); + WRITE_ONCE(sc->ptp_tc_last_host_ns, host_ns); + WRITE_ONCE(sc->ptp_tc_last_ns, ns_out); + WRITE_ONCE(sc->ptp_tc_last_raw, READ_ONCE(sc->ptp_cc_last_raw)); + +out_apply_rem: + if (rem_ns) { + s64 ns_signed = (s64)ns_out + rem_ns; + + if (ns_signed < 0) + return 0; + return (u64)ns_signed; + } + + return ns_out; +} + +/* Apply a frequency adjustment to the PHC timecounter. */ static int ath9k_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct ath_softc *sc = container_of(ptp, struct ath_softc, ptp_clock_info); unsigned long flags; + u64 ext; + u64 tsf; spin_lock_irqsave(&sc->systim_lock, flags); - timecounter_read(&sc->tc); // FIXME: maybe obsolete + sc->ptp_last_adjfine_pre_mult = sc->cc.mult; + ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_ADJFINE); // FIXME: maybe obsolete + sc->ptp_last_adjfine = scaled_ppm; + sc->ptp_last_adjfine_ns = ktime_get_ns(); sc->cc.mult = adjust_by_scaled_ppm((u64)sc->cc_mult, scaled_ppm); + sc->ptp_last_adjfine_post_mult = sc->cc.mult; spin_unlock_irqrestore(&sc->systim_lock, flags); + + if (READ_ONCE(sc->ptp_evtlog_enable)) { + tsf = ath9k_hw_gettsf64(sc->sc_ah); + ext = READ_ONCE(sc->ptp_rx_wrap_ext); + if (READ_ONCE(sc->ptp_tx_wrap_ext) > ext) + ext = READ_ONCE(sc->ptp_tx_wrap_ext); + ath9k_ptp_evtlog_add(sc, ATH9K_PTP_EVT_ADJFINE, + tsf, 0, 0, ext, 0, NULL, NULL); + } + + ath9k_ptp_capture_stack(sc, ATH9K_PTP_EVT_ADJFINE, (s64)scaled_ppm); - // ath_warn(ath9k_hw_common(sc->sc_ah), "phc adjust adj=%llu freq=%u\n", adj, diff); // TODO: only debug FIXME: deprecated - // printk("ath9k: phc adjfine: scaled_ppm = %ld | sc->cc.mult = %lu", scaled_ppm, sc->cc.mult); // tb350: debug return 0; } +/* Apply a step adjustment to the PHC timecounter. */ static int ath9k_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct ath_softc *sc = container_of(ptp, struct ath_softc, ptp_clock_info); unsigned long flags; + u64 pre_ns; + u64 post_ns; + u64 pre_tc_nsec; + u64 post_tc_nsec; + u64 pre_cycle_last; + u64 post_cycle_last; + u64 pre_frac; + u64 post_frac; + s64 delta_ns; + u64 ext; + u64 tsf; spin_lock_irqsave(&sc->systim_lock, flags); + sc->ptp_adj_seq++; + pre_ns = ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_ADJTIME_PRE); + sc->ptp_last_adjtime_pre_ns = pre_ns; + sc->ptp_last_adjtime_cycle_last = sc->tc.cycle_last; + pre_tc_nsec = sc->tc.nsec; + pre_cycle_last = sc->tc.cycle_last; + pre_frac = sc->tc.frac; + sc->ptp_last_adjtime_expected_ns = (s64)pre_ns + delta; + sc->ptp_last_adjtime_cycle = sc->cc.read(&sc->cc); + sc->ptp_last_adjtime = delta; + sc->ptp_last_adjtime_ns = ktime_get_ns(); timecounter_adjtime(&sc->tc, delta); + post_ns = ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_ADJTIME_POST); + post_tc_nsec = sc->tc.nsec; + post_cycle_last = sc->tc.cycle_last; + post_frac = sc->tc.frac; + sc->ptp_last_adjtime_post_ns = post_ns; + ath9k_ptp_tc_mutation_record(sc, ATH9K_PTP_TC_MUT_ADJTIME, + pre_ns + delta, delta, 0, + pre_tc_nsec, pre_cycle_last, pre_frac, + post_tc_nsec, post_cycle_last, post_frac); spin_unlock_irqrestore(&sc->systim_lock, flags); + delta_ns = (s64)post_ns - (s64)pre_ns; + if (READ_ONCE(sc->ptp_evtlog_enable)) { + tsf = ath9k_hw_gettsf64(sc->sc_ah); + ext = READ_ONCE(sc->ptp_rx_wrap_ext); + if (READ_ONCE(sc->ptp_tx_wrap_ext) > ext) + ext = READ_ONCE(sc->ptp_tx_wrap_ext); + ath9k_ptp_evtlog_add(sc, ATH9K_PTP_EVT_ADJTIME, + tsf, 0, 0, ext, 0, NULL, NULL); + } + + ath9k_ptp_capture_stack(sc, ATH9K_PTP_EVT_ADJTIME, delta); + ath_info(ath9k_hw_common(sc->sc_ah), + "ptp adjtime: delta=%lld pre=%llu post=%llu applied=%lld expected=%lld\n", + (long long)delta, + (unsigned long long)pre_ns, + (unsigned long long)post_ns, + (long long)delta_ns, + (long long)((s64)pre_ns + delta)); - // ath_warn(ath9k_hw_common(sc->sc_ah), "phc adjust abs: %lld\n", delta); // TODO: only debug - // printk("ath9k: phc adjtime: delta = %lld\n", delta); // tb350: debug return 0; } +/* Return current PHC time, optionally logging it. */ static int ath9k_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ath_softc *sc = container_of(ptp, struct ath_softc, ptp_clock_info); unsigned long flags; + struct ath9k_ptp_sample sample; u64 ns; + u64 ext; + u64 tsf; spin_lock_irqsave(&sc->systim_lock, flags); - ns = timecounter_read(&sc->tc); + ns = ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_GETTIME); + sample.cycle = sc->tc.cycle_last; + sample.ns = ns; + sample.tc_cycle_last = sc->tc.cycle_last; + sample.tc_nsec = sc->tc.nsec; + sample.tc_frac = sc->tc.frac; + sample.adj_seq = sc->ptp_adj_seq; spin_unlock_irqrestore(&sc->systim_lock, flags); + if (READ_ONCE(sc->ptp_evtlog_enable) && + READ_ONCE(sc->ptp_evtlog_phc)) { + tsf = ath9k_hw_gettsf64(sc->sc_ah); + ext = READ_ONCE(sc->ptp_rx_wrap_ext); + if (READ_ONCE(sc->ptp_tx_wrap_ext) > ext) + ext = READ_ONCE(sc->ptp_tx_wrap_ext); + ath9k_ptp_evtlog_add(sc, ATH9K_PTP_EVT_PHC_GET, + tsf, 0, 0, ext, ns, &sample, NULL); + } + *ts = ns_to_timespec64(ns); return 0; } +/* Set PHC time by applying a signed delta to the timecounter. */ static int ath9k_phc_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct ath_softc *sc = container_of(ptp, struct ath_softc, ptp_clock_info); unsigned long flags; + u64 old_ns; u64 ns; + u64 post_ns; + u64 pre_tc_nsec; + u64 post_tc_nsec; + u64 pre_cycle_last; + u64 post_cycle_last; + u64 pre_frac; + u64 post_frac; + s64 delta; + u64 tsf; ns = timespec64_to_ns(ts); spin_lock_irqsave(&sc->systim_lock, flags); - timecounter_init(&sc->tc, &sc->cc, ns); + old_ns = ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_SETTIME_PRE); + delta = (s64)ns - (s64)old_ns; + sc->ptp_adj_seq++; + sc->ptp_last_adjtime_pre_ns = old_ns; + sc->ptp_last_adjtime_cycle_last = sc->tc.cycle_last; + pre_tc_nsec = sc->tc.nsec; + pre_cycle_last = sc->tc.cycle_last; + pre_frac = sc->tc.frac; + sc->ptp_last_adjtime_expected_ns = (s64)old_ns + delta; + sc->ptp_last_adjtime_cycle = sc->cc.read(&sc->cc); + sc->ptp_last_adjtime = delta; + sc->ptp_last_adjtime_ns = ktime_get_ns(); + timecounter_adjtime(&sc->tc, delta); + post_ns = ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_SETTIME_POST); + post_tc_nsec = sc->tc.nsec; + post_cycle_last = sc->tc.cycle_last; + post_frac = sc->tc.frac; + sc->ptp_last_adjtime_post_ns = post_ns; + ath9k_ptp_tc_mutation_record(sc, ATH9K_PTP_TC_MUT_SETTIME, + ns, delta, 0, + pre_tc_nsec, pre_cycle_last, pre_frac, + post_tc_nsec, post_cycle_last, post_frac); spin_unlock_irqrestore(&sc->systim_lock, flags); + if (READ_ONCE(sc->ptp_evtlog_enable)) { + tsf = ath9k_hw_gettsf64(sc->sc_ah); + ath9k_ptp_evtlog_settime(sc, tsf, old_ns, ns); + } + + ath9k_ptp_capture_stack(sc, ATH9K_PTP_EVT_SETTIME, delta); + return 0; } +/* PTP clock enable hook (unsupported). */ static int ath9k_phc_enable(struct ptp_clock_info __always_unused *ptp, struct ptp_clock_request __always_unused *request, int __always_unused on) { return -EOPNOTSUPP; } +/* PTP clock operations registered with the kernel. */ static const struct ptp_clock_info ath9k_ptp_clock_info = { .owner = THIS_MODULE, .name = "ath9k ptp", @@ -86,31 +408,873 @@ static const struct ptp_clock_info ath9k_ptp_clock_info = { .enable = ath9k_phc_enable, }; +/* Forward HW rebase callback to the driver wrapper. */ +static void ath9k_ptp_wrap_rebase_cb(struct ath_hw *ah, u64 tsf) +{ + struct ath_softc *sc; + + if (!ah || !ah->hw) + return; + sc = ah->hw->priv; + if (sc) + ath9k_ptp_wrap_rebase(sc, tsf); +} + +#define ATH9K_PTP_REBASE_GUARD_US 0x80000000ULL + +static bool ath9k_ptp_rebase_candidate_valid(u64 candidate, u64 tsf) +{ + u64 diff; + + diff = (candidate > tsf) ? (candidate - tsf) : (tsf - candidate); + + return diff <= ATH9K_PTP_REBASE_GUARD_US; +} + +static void ath9k_ptp_reset_runtime_samples(struct ath_softc *sc) +{ + memset(&sc->ptp_sample_rx, 0, sizeof(sc->ptp_sample_rx)); + memset(&sc->ptp_sample_tx, 0, sizeof(sc->ptp_sample_tx)); + sc->ptp_sync_dup_valid = 0; + sc->ptp_sync_dup_last_ext = 0; + sc->ptp_sync_dup_last_delta_us = 0; + sc->ptp_sync_dup_last_seqid = 0xFFFF; + memset(sc->ptp_sync_dup_last_src, 0, sizeof(sc->ptp_sync_dup_last_src)); + sc->ptp_last_rx_hwtstamp_ns = 0; + sc->ptp_last_rx_mactime = 0; + sc->ptp_last_rx_tsf64 = 0; + sc->ptp_last_rx_tstamp = 0; + sc->ptp_last_rx_tsf_offset = 0; + sc->ptp_last_rx_rebase = 0; + sc->ptp_last_rx_msgtype = 0xFF; + sc->ptp_last_rx_seqid = 0xFFFF; + sc->ptp_last_tx_hwtstamp_ns = 0; + sc->ptp_last_tx_tsf_offset = 0; + sc->ptp_last_tx_rebase = 0; + sc->ptp_last_tx_msgtype = 0xFF; + sc->ptp_last_tx_seqid = 0xFFFF; + sc->ptp_sta_hidden_step_last_host_ns = 0; + sc->ptp_sta_hidden_step_last_tsf = 0; + sc->ptp_sta_hidden_step_last_delta_host_ns = 0; + sc->ptp_sta_hidden_step_last_delta_tsf_ns = 0; + sc->ptp_sta_hidden_step_last_corr_ns = 0; + sc->ptp_sta_hidden_step_last_abs_ns = 0; + sc->ptp_sta_hidden_step_last_reanchor_ns = 0; + sc->ptp_sta_hidden_step_backwards_delta_host_ns = 0; + sc->ptp_sta_hidden_step_backwards_delta_tsf_ns = 0; +} + +static void ath9k_ptp_reanchor_now(struct ath_softc *sc, u64 tsf, + u32 read_dur_ns, bool arm_drop_window) +{ + unsigned long flags; + s64 old_offset_us; + s64 old_offset_rem; + s64 old_offset_ns; + s64 phc_ns; + s64 offset_ns; + s64 offset_us; + s64 offset_rem; + u64 drop_until_ns = 0; + u32 drop_ns = 0; + u32 log_dur_ns = read_dur_ns; + s32 rem_ns; + + if (!sc) + return; + + spin_lock_irqsave(&sc->systim_lock, flags); + if (READ_ONCE(sc->sc_ah->ptp_rebase_anchor_valid)) { + spin_unlock_irqrestore(&sc->systim_lock, flags); + return; + } + + old_offset_us = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + old_offset_rem = atomic64_read(&sc->sc_ah->ptp_tsf_offset_ns_rem); + old_offset_ns = old_offset_us * (s64)NSEC_PER_USEC + old_offset_rem; + phc_ns = (s64)ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_NONE); + offset_ns = phc_ns - ((s64)tsf * NSEC_PER_USEC + read_dur_ns); + offset_us = div_s64_rem(offset_ns, NSEC_PER_USEC, &rem_ns); + offset_rem = rem_ns; + + if (arm_drop_window) { + drop_ns = READ_ONCE(sc->ptp_rebase_drop_ns); + if (drop_ns) + drop_until_ns = ktime_get_ns() + drop_ns; + } + + atomic64_set(&sc->sc_ah->ptp_tsf_offset, offset_us); + atomic64_set(&sc->sc_ah->ptp_tsf_offset_ns_rem, offset_rem); + sc->ptp_rx_wrap_last = (u32)tsf; + sc->ptp_tx_wrap_last = (u32)tsf; + sc->ptp_rx_wrap_ext = tsf; + sc->ptp_tx_wrap_ext = tsf; + sc->ptp_rx_wrap_valid = 1; + sc->ptp_tx_wrap_valid = 1; + write_seqcount_begin(&sc->ptp_epoch_seq); + sc->ptp_epoch++; + sc->ptp_epoch_offset_us = offset_us; + sc->ptp_epoch_offset_rem = offset_rem; + sc->ptp_epoch_tsf = tsf; + write_seqcount_end(&sc->ptp_epoch_seq); + sc->ptp_rx_wrap_epoch = sc->ptp_epoch; + sc->ptp_tx_wrap_epoch = sc->ptp_epoch; + sc->ptp_wrap_rebase_tsf = tsf; + sc->ptp_rebase_drop_until_ns = drop_until_ns; + sc->ptp_rebase_old_offset_ns = old_offset_ns; + sc->ptp_rebase_new_offset_ns = offset_ns; + sc->ptp_rebase_anchor_rem_ns = 0; + sc->ptp_guard_inflight = 0; + sc->ptp_guard_until_ns = 0; + ath9k_ptp_reset_runtime_samples(sc); + spin_unlock_irqrestore(&sc->systim_lock, flags); + + if (READ_ONCE(sc->ptp_evtlog_enable)) + ath9k_ptp_evtlog_add(sc, ATH9K_PTP_EVT_REBASE, + tsf, 0, log_dur_ns, tsf, 0, NULL, NULL); +} + +static bool ath9k_ptp_infra_reanchor_guard(struct ath_softc *sc, s64 old_offset_ns, + s64 new_offset_ns) +{ + u64 delta_ns; + u32 guard_ns; + + if (!old_offset_ns) + return true; + + guard_ns = READ_ONCE(sc->ptp_infra_reanchor_guard_ns); + if (!guard_ns) + return true; + + delta_ns = (old_offset_ns > new_offset_ns) ? + (u64)(old_offset_ns - new_offset_ns) : + (u64)(new_offset_ns - old_offset_ns); + + WRITE_ONCE(sc->ptp_infra_reanchor_last_delta_ns, delta_ns); + if (delta_ns <= (u64)guard_ns) + return true; + + sc->ptp_infra_reanchor_reject_cnt++; + return false; +} + +static void ath9k_ptp_state_reset_now(struct ath_softc *sc, u64 tsf) +{ + unsigned long flags; + s64 old_offset_us; + s64 old_offset_rem; + s64 old_offset_ns; + + if (!sc) + return; + + spin_lock_irqsave(&sc->systim_lock, flags); + old_offset_us = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + old_offset_rem = atomic64_read(&sc->sc_ah->ptp_tsf_offset_ns_rem); + old_offset_ns = old_offset_us * (s64)NSEC_PER_USEC + old_offset_rem; + + atomic64_set(&sc->sc_ah->ptp_tsf_offset, 0); + atomic64_set(&sc->sc_ah->ptp_tsf_offset_ns_rem, 0); + atomic64_set(&sc->sc_ah->ptp_settsf_dur_ns, 0); + WRITE_ONCE(sc->sc_ah->ptp_rebase_anchor_valid, false); + WRITE_ONCE(sc->sc_ah->ptp_rebase_phc, 0); + WRITE_ONCE(sc->sc_ah->ptp_rebase_phc_rem_ns, 0); + + sc->ptp_rx_wrap_last = (u32)tsf; + sc->ptp_tx_wrap_last = (u32)tsf; + sc->ptp_rx_wrap_ext = tsf; + sc->ptp_tx_wrap_ext = tsf; + sc->ptp_rx_wrap_valid = 1; + sc->ptp_tx_wrap_valid = 1; + + write_seqcount_begin(&sc->ptp_epoch_seq); + sc->ptp_epoch++; + sc->ptp_epoch_offset_us = 0; + sc->ptp_epoch_offset_rem = 0; + sc->ptp_epoch_tsf = tsf; + write_seqcount_end(&sc->ptp_epoch_seq); + + sc->ptp_rx_wrap_epoch = sc->ptp_epoch; + sc->ptp_tx_wrap_epoch = sc->ptp_epoch; + sc->ptp_wrap_rebase_cnt++; + sc->ptp_wrap_rebase_tsf = tsf; + sc->ptp_rebase_drop_until_ns = 0; + sc->ptp_rebase_old_offset_ns = old_offset_ns; + sc->ptp_rebase_new_offset_ns = 0; + sc->ptp_rebase_anchor_rem_ns = 0; + sc->ptp_guard_inflight = 0; + sc->ptp_guard_until_ns = 0; + sc->ptp_sync_dup_valid = 0; + sc->ptp_sync_dup_last_ext = 0; + sc->ptp_sync_dup_last_seqid = 0xFFFF; + memset(sc->ptp_sync_dup_last_src, 0, sizeof(sc->ptp_sync_dup_last_src)); + sc->ptp_sta_hidden_step_last_host_ns = 0; + sc->ptp_sta_hidden_step_last_tsf = 0; + sc->ptp_sta_hidden_step_last_delta_host_ns = 0; + sc->ptp_sta_hidden_step_last_delta_tsf_ns = 0; + sc->ptp_sta_hidden_step_last_corr_ns = 0; + sc->ptp_sta_hidden_step_last_abs_ns = 0; + sc->ptp_sta_hidden_step_last_reanchor_ns = 0; + sc->ptp_sta_hidden_step_backwards_cnt = 0; + sc->ptp_sta_hidden_step_backwards_delta_host_ns = 0; + sc->ptp_sta_hidden_step_backwards_delta_tsf_ns = 0; + ath9k_ptp_reset_runtime_samples(sc); + spin_unlock_irqrestore(&sc->systim_lock, flags); + + if (READ_ONCE(sc->ptp_evtlog_enable)) + ath9k_ptp_evtlog_add(sc, ATH9K_PTP_EVT_REBASE, + tsf, 0, 0, tsf, 0, NULL, NULL); +} + +/* Initialize PTP state and register the PHC. */ void ath9k_ptp_init(struct ath_softc *sc) { - sc->ptp_clock = NULL; + int i; - sc->ptp_clock_info = ath9k_ptp_clock_info; + sc->ptp_clock = NULL; + sc->ptp_wrap_glitch_thresh = 0; + sc->ptp_rebase_drop_ns = NSEC_PER_SEC; + sc->ptp_rebase_drop_until_ns = 0; + sc->ptp_rebase_drop_cnt = 0; + sc->ptp_rebase_rx_reject_cnt = 0; + sc->ptp_rebase_tx_reject_cnt = 0; + sc->ptp_rebase_fallback_cnt = 0; + sc->ptp_guard_timeout_ms = 50; + sc->ptp_guard_after_event_ms = 50; + sc->ptp_guard_until_ns = 0; + sc->ptp_guard_seqid = 0; + sc->ptp_guard_inflight = 0; + sc->ptp_guard_block_cnt = 0; + sc->ptp_guard_timeout_cnt = 0; + sc->ptp_guard_block_last_ns = 0; + sc->ptp_sync_dup_drop_cnt = 0; + sc->ptp_sync_dup_window_us = 2000; + sc->ptp_sync_dup_last_delta_us = 0; + sc->ptp_sync_dup_last_ext = 0; + sc->ptp_sta_hidden_step_thresh_ns = 50000; + sc->ptp_infra_reanchor_guard_ns = 1000000; + sc->ptp_infra_reanchor_enable = 0; + sc->ptp_sta_hidden_step_cnt = 0; + sc->ptp_sta_hidden_step_last_host_ns = 0; + sc->ptp_sta_hidden_step_last_tsf = 0; + sc->ptp_sta_hidden_step_last_delta_host_ns = 0; + sc->ptp_sta_hidden_step_last_delta_tsf_ns = 0; + sc->ptp_sta_hidden_step_last_corr_ns = 0; + sc->ptp_sta_hidden_step_last_abs_ns = 0; + sc->ptp_sta_hidden_step_last_reanchor_ns = 0; + sc->ptp_sta_hidden_step_backwards_cnt = 0; + sc->ptp_sta_hidden_step_backwards_delta_host_ns = 0; + sc->ptp_sta_hidden_step_backwards_delta_tsf_ns = 0; + sc->ptp_infra_reanchor_reject_cnt = 0; + sc->ptp_infra_reanchor_last_delta_ns = 0; + sc->ptp_sync_dup_last_seqid = 0xFFFF; + sc->ptp_sync_dup_valid = 0; + memset(sc->ptp_sync_dup_last_src, 0, sizeof(sc->ptp_sync_dup_last_src)); + sc->ptp_last_event_ns = 0; + sc->ptp_last_event_rx_ns = 0; + sc->ptp_last_event_tx_ns = 0; + sc->ptp_settsf_last_ns = 0; + sc->ptp_settsf_last_delta_ns = 0; + sc->ptp_settsf_cnt = 0; + sc->ptp_settsf_last_jump_ns = 0; + sc->ptp_settsf_dur_filter_enable = 1; + sc->ptp_settsf_dur_pct_over_avg = 50; + sc->ptp_settsf_dur_hist_max = 9; + sc->ptp_settsf_dur_drop_cnt = 0; + sc->ptp_settsf_dur_pass_cnt = 0; + sc->ptp_settsf_dur_last_ns = 0; + sc->ptp_settsf_dur_last_avg_ns = 0; + sc->ptp_settsf_dur_last_med_ns = 0; + sc->ptp_settsf_dur_last_thresh_ns = 0; + sc->ptp_settsf_dur_hist_idx = 0; + sc->ptp_settsf_dur_hist_cnt = 0; + memset(sc->ptp_settsf_dur_hist, 0, sizeof(sc->ptp_settsf_dur_hist)); + sc->ptp_settsf_write_pct_over_avg = 50; + sc->ptp_settsf_write_hist_max = 9; + sc->ptp_settsf_write_last_ns = 0; + sc->ptp_settsf_write_last_avg_ns = 0; + sc->ptp_settsf_write_last_cap_ns = 0; + sc->ptp_settsf_write_hist_idx = 0; + sc->ptp_settsf_write_hist_cnt = 0; + memset(sc->ptp_settsf_write_hist, 0, + sizeof(sc->ptp_settsf_write_hist)); + seqcount_init(&sc->ptp_epoch_seq); + sc->ptp_epoch = 0; + sc->ptp_epoch_offset_us = 0; + sc->ptp_epoch_offset_rem = 0; + sc->ptp_epoch_tsf = 0; + sc->ptp_stall_last_rx_ns = 0; + sc->ptp_stall_last_rx_delta_ns = 0; + sc->ptp_stall_rx_cnt = 0; + sc->ptp_stall_last_tx_ns = 0; + sc->ptp_stall_last_tx_delta_ns = 0; + sc->ptp_stall_tx_cnt = 0; + sc->ptp_tx_use_duration = 1; + sc->ptp_rx_use_duration = 0; + sc->ptp_rx_drop_cnt = 0; + sc->ptp_epoch_drop_cnt = 0; + sc->ptp_rx_hwts_cnt = 0; + sc->ptp_rx_parse_attempt_cnt = 0; + sc->ptp_rx_parse_miss_cnt = 0; + sc->ptp_rx_parse_last_reason = ATH9K_PTP_PARSE_OK; + sc->ptp_rx_parse_last_msgtype = 0xFF; + sc->ptp_rx_parse_last_seqid = 0xFFFF; + sc->ptp_rx_parse_last_skb_len = 0; + sc->ptp_rx_parse_last_hdrlen = 0; + sc->ptp_rx_parse_last_payload_len = 0; + sc->ptp_rx_parse_last_snap_off = 0; + sc->ptp_rx_parse_last_ptp_off = 0; + sc->ptp_rx_parse_last_ptp_len = 0; + sc->ptp_rx_parse_last_ethertype = 0; + sc->ptp_rx_parse_last_sport = 0; + sc->ptp_rx_parse_last_dport = 0; + sc->ptp_rx_parse_last_fc = 0; + sc->ptp_rx_parse_last_seq_ctrl = 0; + sc->ptp_rx_parse_last_ip_version = 0; + sc->ptp_rx_parse_last_ip_proto = 0; + sc->ptp_rx_parse_last_ihl = 0; + sc->ptp_rx_parse_last_bytes_len = 0; + memset(sc->ptp_rx_parse_last_bytes, 0, + sizeof(sc->ptp_rx_parse_last_bytes)); + sc->ptp_rx_ptp_seen = 0; + sc->ptp_rx_hwts_done = 0; + sc->ptp_rx_hwts_miss = 0; + sc->ptp_rx_wrap_last = 0; + sc->ptp_rx_wrap_ext = 0; + sc->ptp_rx_wrap_valid = 0; + sc->ptp_rx_wrap_epoch = 0; + sc->ptp_rx_wrap_last_prev = 0; + sc->ptp_rx_wrap_delta = 0; + sc->ptp_rx_wrap_delta_s = 0; + sc->ptp_rx_wrap_prev_ext = 0; + sc->ptp_tx_wrap_last = 0; + sc->ptp_tx_wrap_ext = 0; + sc->ptp_tx_wrap_valid = 0; + sc->ptp_tx_wrap_epoch = 0; + sc->ptp_tx_wrap_last_prev = 0; + sc->ptp_tx_wrap_delta = 0; + sc->ptp_tx_wrap_delta_s = 0; + sc->ptp_tx_wrap_prev_ext = 0; + sc->ptp_wrap_rebase_cnt = 0; + sc->ptp_raw_tsf_last = 0; + sc->ptp_raw_tsf_wraps = 0; + sc->ptp_raw_tsf_rebase_seen = 0; + sc->ptp_raw_tsf_valid = 0; + sc->ptp_rebase_old_offset_ns = 0; + sc->ptp_rebase_new_offset_ns = 0; + sc->ptp_rebase_anchor_rem_ns = 0; + sc->ptp_last_tx_hwtstamp_ns = 0; + sc->ptp_last_rx_msgtype = 0xFF; + sc->ptp_last_rx_seqid = 0xFFFF; + sc->ptp_last_tx_msgtype = 0xFF; + sc->ptp_last_tx_seqid = 0xFFFF; + sc->ptp_last_rx_tsf_offset = 0; + sc->ptp_last_rx_rebase = 0; + sc->ptp_last_tx_tsf_offset = 0; + sc->ptp_last_tx_rebase = 0; + sc->ptp_stall_enable = 0; + sc->ptp_stall_thresh_ns = 200000; + sc->ptp_tx_ptp_seen = 0; + sc->ptp_tx_tstamp_req = 0; + sc->ptp_tx_tstamp_done = 0; + sc->ptp_tx_tstamp_miss = 0; + sc->ptp_noack_ptp_event_enable = 1; + sc->ptp_tx_noack_event_cnt = 0; + sc->ptp_tx_noack_last_msgtype = 0xFF; + sc->ptp_tx_noack_last_seqid = 0xFFFF; + sc->ptp_tx_suspect_status_cnt = 0; + sc->ptp_tx_suspect_status_last_hwtstamp_ns = 0; + sc->ptp_tx_suspect_status_last_tstamp = 0; + sc->ptp_tx_suspect_status_last_msgtype = 0xFF; + sc->ptp_tx_suspect_status_last_seqid = 0xFFFF; + sc->ptp_tx_suspect_status_last_rateindex = 0; + sc->ptp_tx_suspect_status_last_shortretry = 0; + sc->ptp_tx_suspect_status_last_longretry = 0; + sc->ptp_tx_suspect_status_last_status = 0; + sc->ptp_tx_suspect_status_last_flags = 0; + for (i = 0; i < ATH9K_PTP_MSGTYPE_MAX; i++) { + sc->ptp_rx_type_seen[i] = 0; + sc->ptp_rx_type_done[i] = 0; + sc->ptp_rx_type_miss[i] = 0; + sc->ptp_tx_type_seen[i] = 0; + sc->ptp_tx_type_req[i] = 0; + sc->ptp_tx_type_done[i] = 0; + sc->ptp_tx_type_miss[i] = 0; + } + atomic_set(&sc->ptp_evtlog_seq, 0); + sc->ptp_evtlog_enable = 0; + sc->ptp_evtlog_ptp_only = 0; + sc->ptp_evtlog_phc = 0; + atomic_set(&sc->ptp_tc_trace_seq, 0); + sc->ptp_tc_trace_enable = 0; + if (sc->ptp_tc_trace && sc->ptp_tc_trace_len) + memset(sc->ptp_tc_trace, 0, + sc->ptp_tc_trace_len * sizeof(*sc->ptp_tc_trace)); + atomic_set(&sc->ptp_handoff_seq, 0); + if (sc->ptp_handoff && sc->ptp_handoff_len) + memset(sc->ptp_handoff, 0, + sc->ptp_handoff_len * sizeof(*sc->ptp_handoff)); + sc->ptp_stack_enable = 0; + sc->ptp_last_stack_len = 0; + sc->ptp_last_stack_event = 0; + sc->ptp_last_stack_arg = 0; + sc->ptp_last_stack_ns = 0; + sc->ptp_last_stack_pid = 0; + sc->ptp_last_stack_comm[0] = '\0'; + sc->ptp_cc_last = 0; + sc->ptp_cc_last_raw = 0; + sc->ptp_cc_last_hi1 = 0; + sc->ptp_cc_last_lo = 0; + sc->ptp_cc_last_hi2 = 0; + sc->ptp_cc_glitch_cnt = 0; + sc->ptp_cc_glitch_ts_ns = 0; + sc->ptp_cc_glitch_last = 0; + sc->ptp_cc_glitch_new = 0; + sc->ptp_cc_glitch_delta = 0; + sc->ptp_cc_glitch_raw = 0; + sc->ptp_cc_glitch_offset = 0; + sc->ptp_cc_glitch_hi1 = 0; + sc->ptp_cc_glitch_lo = 0; + sc->ptp_cc_glitch_hi2 = 0; + sc->ptp_cc_glitch_pid = 0; + sc->ptp_cc_glitch_comm[0] = '\0'; + sc->ptp_cc_glitch_valid = 0; + sc->ptp_tc_last_host_ns = 0; + sc->ptp_tc_last_ns = 0; + sc->ptp_tc_last_raw = 0; + sc->ptp_tc_anomaly_thresh_ns = NSEC_PER_MSEC; + sc->ptp_tc_anomaly_cnt = 0; + sc->ptp_tc_anomaly_ts_ns = 0; + sc->ptp_tc_anomaly_host_ns = 0; + sc->ptp_tc_anomaly_host_last_ns = 0; + sc->ptp_tc_anomaly_ns = 0; + sc->ptp_tc_anomaly_ns_last = 0; + sc->ptp_tc_anomaly_cycle_now = 0; + sc->ptp_tc_anomaly_cycle_last = 0; + sc->ptp_tc_anomaly_raw_now = 0; + sc->ptp_tc_anomaly_raw_last = 0; + sc->ptp_tc_anomaly_adj_seq = 0; + sc->ptp_tc_anomaly_host_delta_ns = 0; + sc->ptp_tc_anomaly_ns_delta_ns = 0; + sc->ptp_tc_anomaly_cycle_delta = 0; + sc->ptp_tc_anomaly_raw_delta = 0; + sc->ptp_tc_anomaly_offset_ns = 0; + sc->ptp_tc_anomaly_mismatch_ns = 0; + sc->ptp_tc_anomaly_ns_offset = 0; + sc->ptp_tc_anomaly_cc_mult = 0; + sc->ptp_tc_anomaly_cc_mask = 0; + sc->ptp_tc_anomaly_cc_shift = 0; + sc->ptp_tc_anomaly_source = ATH9K_PTP_TC_ANOM_SRC_NONE; + sc->ptp_tc_anomaly_reason = ATH9K_PTP_TC_TRACE_RSN_NONE; + sc->ptp_tc_anomaly_valid = 0; + sc->ptp_hwt_last_ns = 0; + sc->ptp_hwt_last_tc_nsec = 0; + sc->ptp_hwt_anomaly_cnt = 0; + sc->ptp_hwt_anomaly_ts_ns = 0; + sc->ptp_hwt_anomaly_prev_ns = 0; + sc->ptp_hwt_anomaly_ns = 0; + sc->ptp_hwt_anomaly_prev_tc_nsec = 0; + sc->ptp_hwt_anomaly_tc_nsec = 0; + sc->ptp_hwt_anomaly_cycle = 0; + sc->ptp_hwt_anomaly_cycle_adj = 0; + sc->ptp_hwt_anomaly_tc_cycle_last = 0; + sc->ptp_hwt_anomaly_tsf64 = 0; + sc->ptp_hwt_anomaly_adj_seq = 0; + sc->ptp_hwt_anomaly_rebase_cnt = 0; + sc->ptp_hwt_anomaly_offset_ns = 0; + sc->ptp_hwt_anomaly_offset_rem_ns = 0; + sc->ptp_hwt_anomaly_epoch = 0; + sc->ptp_hwt_anomaly_reason = ATH9K_PTP_TC_TRACE_RSN_NONE; + sc->ptp_hwt_anomaly_valid = 0; + sc->ptp_tc_mutation_cnt = 0; + sc->ptp_tc_mutation_ts_ns = 0; + sc->ptp_tc_mutation_target_ns = 0; + sc->ptp_tc_mutation_delta_ns = 0; + sc->ptp_tc_mutation_pre_tc_nsec = 0; + sc->ptp_tc_mutation_post_tc_nsec = 0; + sc->ptp_tc_mutation_pre_cycle_last = 0; + sc->ptp_tc_mutation_post_cycle_last = 0; + sc->ptp_tc_mutation_pre_frac = 0; + sc->ptp_tc_mutation_post_frac = 0; + sc->ptp_tc_mutation_adj_seq = 0; + sc->ptp_tc_mutation_cc_mult = 0; + sc->ptp_tc_mutation_cc_shift = 0; + sc->ptp_tc_mutation_shift_arg = 0; + sc->ptp_tc_mutation_source = ATH9K_PTP_TC_MUT_NONE; + sc->ptp_tc_mutation_valid = 0; + atomic64_set(&sc->sc_ah->ptp_tsf_offset, 0); + atomic64_set(&sc->sc_ah->ptp_tsf_offset_ns_rem, 0); + atomic64_set(&sc->sc_ah->ptp_settsf_dur_ns, 0); + atomic64_set(&sc->sc_ah->ptp_tsf_get64_cnt, 0); + atomic64_set(&sc->sc_ah->ptp_tsf_get32_cnt, 0); + atomic64_set(&sc->sc_ah->ptp_tsf_set_cnt, 0); + atomic64_set(&sc->sc_ah->ptp_tsf_reset_cnt, 0); + atomic64_set(&sc->sc_ah->ptp_tsf_cc_read_cnt, 0); + atomic64_set(&sc->sc_ah->ptp_tsf_aic_read_cnt, 0); + sc->ptp_settsf_enable = 1; + sc->ptp_settsf_delta_thresh_us = 20; + sc->ptp_settsf_delta_skip_cnt = 0; + WRITE_ONCE(sc->sc_ah->ptp_rebase_anchor_valid, false); + WRITE_ONCE(sc->sc_ah->ptp_rebase_phc, 0); + WRITE_ONCE(sc->sc_ah->ptp_rebase_phc_rem_ns, 0); + sc->sc_ah->ptp_wrap_rebase = ath9k_ptp_wrap_rebase_cb; + sc->sc_ah->ptp_settsf_log = ath9k_ptp_settsf_log_cb; + sc->sc_ah->ptp_resettsf_log = ath9k_ptp_resettsf_log_cb; - snprintf(sc->ptp_clock_info.name, - sizeof(sc->ptp_clock_info.name), "%pm", - sc->hw->wiphy->perm_addr); + sc->ptp_clock_info = ath9k_ptp_clock_info; - sc->ptp_clock_info.max_adj = 1e6; + snprintf(sc->ptp_clock_info.name, + sizeof(sc->ptp_clock_info.name), "%pm", + sc->hw->wiphy->perm_addr); - sc->ptp_clock = ptp_clock_register(&sc->ptp_clock_info, sc->dev); + sc->ptp_clock_info.max_adj = 1e6; - if (IS_ERR(sc->ptp_clock)) { - sc->ptp_clock = NULL; - ath_err(ath9k_hw_common(sc->sc_ah), "ptp_clock_register failed\n"); - } else if (sc->ptp_clock) { - ath_info(ath9k_hw_common(sc->sc_ah), "registered PHC clock\n"); - } + sc->ptp_clock = ptp_clock_register(&sc->ptp_clock_info, sc->dev); + + if (IS_ERR(sc->ptp_clock)) { + sc->ptp_clock = NULL; + ath_err(ath9k_hw_common(sc->sc_ah), + "ptp_clock_register failed\n"); + } else if (sc->ptp_clock) { + ath_info(ath9k_hw_common(sc->sc_ah), "registered PHC clock\n"); + } } +void ath9k_ptp_mode_reset(struct ath_softc *sc) +{ + if (!sc) + return; + + ath9k_ptp_state_reset_now(sc, ath9k_hw_gettsf64(sc->sc_ah)); +} + +bool ath9k_ptp_infra_reanchor(struct ath_softc *sc, u64 tsf, u32 read_dur_ns) +{ + unsigned long flags; + s64 old_offset_us; + s64 old_offset_rem; + s64 old_offset_ns; + s64 phc_ns; + s64 new_offset_ns; + bool allow; + + if (!sc) + return false; + + spin_lock_irqsave(&sc->systim_lock, flags); + old_offset_us = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + old_offset_rem = atomic64_read(&sc->sc_ah->ptp_tsf_offset_ns_rem); + old_offset_ns = old_offset_us * (s64)NSEC_PER_USEC + old_offset_rem; + phc_ns = (s64)ath9k_ptp_tc_read(sc, ATH9K_PTP_TC_TRACE_RSN_NONE); + new_offset_ns = phc_ns - ((s64)tsf * NSEC_PER_USEC + read_dur_ns); + allow = ath9k_ptp_infra_reanchor_guard(sc, old_offset_ns, new_offset_ns); + spin_unlock_irqrestore(&sc->systim_lock, flags); + + if (!allow) + return false; + + ath9k_ptp_reanchor_now(sc, tsf, read_dur_ns, true); + return true; +} + +void ath9k_ptp_sta_hidden_step_check(struct ath_softc *sc, u64 tsf, u64 host_ns) +{ + unsigned long flags; + s64 delta_host_ns; + s64 delta_tsf_ns; + s64 corr_ns; + s64 old_offset_us; + s64 old_offset_rem; + s64 old_offset_ns; + s64 new_offset_ns; + s64 new_offset_us; + s64 new_offset_rem; + u64 last_host_ns; + u64 last_tsf; + u64 drop_until_ns = 0; + u64 abs_corr_ns; + u32 drop_ns = 0; + u32 thresh_ns; + s32 rem_ns; + + if (!sc) + return; + + thresh_ns = READ_ONCE(sc->ptp_sta_hidden_step_thresh_ns); + if (!thresh_ns) + return; + + spin_lock_irqsave(&sc->systim_lock, flags); + last_host_ns = sc->ptp_sta_hidden_step_last_host_ns; + last_tsf = sc->ptp_sta_hidden_step_last_tsf; + sc->ptp_sta_hidden_step_last_host_ns = host_ns; + sc->ptp_sta_hidden_step_last_tsf = tsf; + + if (!last_host_ns || !last_tsf || host_ns <= last_host_ns) { + sc->ptp_sta_hidden_step_last_delta_host_ns = 0; + sc->ptp_sta_hidden_step_last_delta_tsf_ns = 0; + sc->ptp_sta_hidden_step_last_corr_ns = 0; + sc->ptp_sta_hidden_step_last_abs_ns = 0; + spin_unlock_irqrestore(&sc->systim_lock, flags); + return; + } + + delta_host_ns = (s64)(host_ns - last_host_ns); + delta_tsf_ns = ((s64)tsf - (s64)last_tsf) * NSEC_PER_USEC; + corr_ns = delta_host_ns - delta_tsf_ns; + abs_corr_ns = (corr_ns < 0) ? (u64)(-corr_ns) : (u64)corr_ns; + sc->ptp_sta_hidden_step_last_delta_host_ns = delta_host_ns; + sc->ptp_sta_hidden_step_last_delta_tsf_ns = delta_tsf_ns; + sc->ptp_sta_hidden_step_last_corr_ns = corr_ns; + sc->ptp_sta_hidden_step_last_abs_ns = abs_corr_ns; + + if (tsf <= last_tsf) { + sc->ptp_sta_hidden_step_backwards_cnt++; + sc->ptp_sta_hidden_step_backwards_delta_host_ns = delta_host_ns; + sc->ptp_sta_hidden_step_backwards_delta_tsf_ns = delta_tsf_ns; + spin_unlock_irqrestore(&sc->systim_lock, flags); + return; + } + + if (abs_corr_ns <= (u64)thresh_ns) { + spin_unlock_irqrestore(&sc->systim_lock, flags); + return; + } + + old_offset_us = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + old_offset_rem = atomic64_read(&sc->sc_ah->ptp_tsf_offset_ns_rem); + old_offset_ns = old_offset_us * (s64)NSEC_PER_USEC + old_offset_rem; + new_offset_ns = old_offset_ns + corr_ns; + new_offset_us = div_s64_rem(new_offset_ns, NSEC_PER_USEC, &rem_ns); + new_offset_rem = rem_ns; + + drop_ns = READ_ONCE(sc->ptp_rebase_drop_ns); + if (drop_ns) + drop_until_ns = ktime_get_ns() + drop_ns; + + atomic64_set(&sc->sc_ah->ptp_tsf_offset, new_offset_us); + atomic64_set(&sc->sc_ah->ptp_tsf_offset_ns_rem, new_offset_rem); + sc->ptp_rx_wrap_last = (u32)tsf; + sc->ptp_tx_wrap_last = (u32)tsf; + sc->ptp_rx_wrap_ext = tsf; + sc->ptp_tx_wrap_ext = tsf; + sc->ptp_rx_wrap_valid = 1; + sc->ptp_tx_wrap_valid = 1; + write_seqcount_begin(&sc->ptp_epoch_seq); + sc->ptp_epoch++; + sc->ptp_epoch_offset_us = new_offset_us; + sc->ptp_epoch_offset_rem = new_offset_rem; + sc->ptp_epoch_tsf = tsf; + write_seqcount_end(&sc->ptp_epoch_seq); + sc->ptp_rx_wrap_epoch = sc->ptp_epoch; + sc->ptp_tx_wrap_epoch = sc->ptp_epoch; + sc->ptp_wrap_rebase_cnt++; + sc->ptp_wrap_rebase_tsf = tsf; + sc->ptp_rebase_drop_until_ns = drop_until_ns; + sc->ptp_rebase_old_offset_ns = old_offset_ns; + sc->ptp_rebase_new_offset_ns = new_offset_ns; + sc->ptp_rebase_anchor_rem_ns = 0; + sc->ptp_guard_inflight = 0; + sc->ptp_guard_until_ns = 0; + sc->ptp_sta_hidden_step_cnt++; + ath9k_ptp_reset_runtime_samples(sc); + sc->ptp_sta_hidden_step_last_host_ns = host_ns; + sc->ptp_sta_hidden_step_last_tsf = tsf; + sc->ptp_sta_hidden_step_last_delta_host_ns = delta_host_ns; + sc->ptp_sta_hidden_step_last_delta_tsf_ns = delta_tsf_ns; + sc->ptp_sta_hidden_step_last_corr_ns = corr_ns; + sc->ptp_sta_hidden_step_last_abs_ns = abs_corr_ns; + sc->ptp_sta_hidden_step_last_reanchor_ns = host_ns; + spin_unlock_irqrestore(&sc->systim_lock, flags); + + if (READ_ONCE(sc->ptp_evtlog_enable)) + ath9k_ptp_evtlog_add(sc, ATH9K_PTP_EVT_REBASE, + tsf, 0, 0, tsf, 0, NULL, NULL); +} + +/* Rebase wrap extenders and TSF offset after TSF change. */ +void ath9k_ptp_wrap_rebase(struct ath_softc *sc, u64 tsf) +{ + unsigned long flags; + u64 rx_prev; + u64 tx_prev; + u64 base_epoch; + u64 new_ext; + u64 drop_until_ns = 0; + s64 offset_us; + s64 offset_rem; + s64 offset_ns; + s64 old_offset_us; + s64 old_rem; + s64 old_offset_ns; + bool anchor_valid; + u64 anchor_phc; + s64 anchor_rem; + u64 log_tsf = tsf; + u64 log_ext; + u32 drop_ns; + s64 dur_ns; + s32 rem_ns; + u32 dur_u32; + bool rx_ok = false; + bool tx_ok = false; + + spin_lock_irqsave(&sc->systim_lock, flags); + anchor_valid = READ_ONCE(sc->sc_ah->ptp_rebase_anchor_valid); + anchor_phc = READ_ONCE(sc->sc_ah->ptp_rebase_phc); + anchor_rem = READ_ONCE(sc->sc_ah->ptp_rebase_phc_rem_ns); + old_offset_us = atomic64_read(&sc->sc_ah->ptp_tsf_offset); + old_rem = atomic64_read(&sc->sc_ah->ptp_tsf_offset_ns_rem); + dur_ns = atomic64_xchg(&sc->sc_ah->ptp_settsf_dur_ns, 0); + old_offset_ns = old_offset_us * (s64)NSEC_PER_USEC + old_rem; + if (anchor_valid) { + offset_ns = (s64)anchor_phc * NSEC_PER_USEC + + anchor_rem + dur_ns - + (s64)tsf * NSEC_PER_USEC; + } else { + rx_prev = sc->ptp_rx_wrap_ext; + tx_prev = sc->ptp_tx_wrap_ext; + + if (sc->ptp_rx_wrap_valid) { + rx_ok = ath9k_ptp_rebase_candidate_valid(rx_prev, tsf); + if (!rx_ok) + sc->ptp_rebase_rx_reject_cnt++; + } + + if (sc->ptp_tx_wrap_valid) { + tx_ok = ath9k_ptp_rebase_candidate_valid(tx_prev, tsf); + if (!tx_ok) + sc->ptp_rebase_tx_reject_cnt++; + } + + if (rx_ok && tx_ok) { + base_epoch = max_t(u64, rx_prev, tx_prev); + } else if (rx_ok) { + base_epoch = rx_prev; + } else if (tx_ok) { + base_epoch = tx_prev; + } else { + base_epoch = tsf; + if (sc->ptp_rx_wrap_valid || sc->ptp_tx_wrap_valid) + sc->ptp_rebase_fallback_cnt++; + } + + offset_ns = ((s64)base_epoch + old_offset_us - (s64)tsf) * + NSEC_PER_USEC + old_rem; + } + + drop_ns = READ_ONCE(sc->ptp_rebase_drop_ns); + if (drop_ns) + drop_until_ns = ktime_get_ns() + drop_ns; + + offset_us = div_s64_rem(offset_ns, NSEC_PER_USEC, &rem_ns); + offset_rem = rem_ns; + + new_ext = tsf; + atomic64_set(&sc->sc_ah->ptp_tsf_offset, offset_us); + atomic64_set(&sc->sc_ah->ptp_tsf_offset_ns_rem, offset_rem); + sc->ptp_rx_wrap_last = (u32)tsf; + sc->ptp_tx_wrap_last = (u32)tsf; + sc->ptp_rx_wrap_ext = new_ext; + sc->ptp_tx_wrap_ext = new_ext; + sc->ptp_rx_wrap_valid = 1; + sc->ptp_tx_wrap_valid = 1; + write_seqcount_begin(&sc->ptp_epoch_seq); + sc->ptp_epoch++; + sc->ptp_epoch_offset_us = offset_us; + sc->ptp_epoch_offset_rem = offset_rem; + sc->ptp_epoch_tsf = tsf; + write_seqcount_end(&sc->ptp_epoch_seq); + sc->ptp_rx_wrap_epoch = sc->ptp_epoch; + sc->ptp_tx_wrap_epoch = sc->ptp_epoch; + sc->ptp_wrap_rebase_cnt++; + sc->ptp_wrap_rebase_tsf = tsf; + sc->ptp_rebase_drop_until_ns = drop_until_ns; + sc->ptp_rebase_old_offset_ns = old_offset_ns; + sc->ptp_rebase_new_offset_ns = offset_ns; + sc->ptp_rebase_anchor_rem_ns = anchor_rem; + if (anchor_valid) + WRITE_ONCE(sc->sc_ah->ptp_rebase_anchor_valid, false); + log_ext = new_ext; + spin_unlock_irqrestore(&sc->systim_lock, flags); + + if (dur_ns <= 0) + dur_u32 = 0; + else if (dur_ns > (s64)U32_MAX) + dur_u32 = U32_MAX; + else + dur_u32 = (u32)dur_ns; + + if (READ_ONCE(sc->ptp_evtlog_enable)) + ath9k_ptp_evtlog_add(sc, ATH9K_PTP_EVT_REBASE, + log_tsf, 0, dur_u32, log_ext, 0, NULL, NULL); +} + +/* Arm guard while a Delay_Req is in flight. */ +void ath9k_ptp_guard_delay_req(struct ath_softc *sc, u16 seq_id) +{ + u64 now_ns; + u64 guard_ns; + u32 timeout_ms; + + if (!sc) + return; + + timeout_ms = READ_ONCE(sc->ptp_guard_timeout_ms); + if (!timeout_ms) + return; + + now_ns = ktime_get_ns(); + guard_ns = (u64)timeout_ms * NSEC_PER_MSEC; + WRITE_ONCE(sc->ptp_guard_seqid, seq_id); + WRITE_ONCE(sc->ptp_guard_inflight, 1); + WRITE_ONCE(sc->ptp_guard_until_ns, now_ns + guard_ns); +} + +/* Clear guard once matching Delay_Resp is observed on RX or TX. */ +void ath9k_ptp_guard_delay_resp(struct ath_softc *sc, u16 seq_id) +{ + if (!sc) + return; + if (!READ_ONCE(sc->ptp_guard_inflight)) + return; + if (READ_ONCE(sc->ptp_guard_seqid) != seq_id) + return; + WRITE_ONCE(sc->ptp_guard_inflight, 0); + WRITE_ONCE(sc->ptp_guard_until_ns, 0); +} + +/* Arm guard after any PTP event (independent of seqid). */ +void ath9k_ptp_guard_event(struct ath_softc *sc) +{ + u64 now_ns; + u64 guard_ns; + u64 until_ns; + u32 after_ms; + + if (!sc) + return; + + after_ms = READ_ONCE(sc->ptp_guard_after_event_ms); + if (!after_ms) + return; + + now_ns = ktime_get_ns(); + guard_ns = (u64)after_ms * NSEC_PER_MSEC; + until_ns = now_ns + guard_ns; + + if (until_ns > READ_ONCE(sc->ptp_guard_until_ns)) + WRITE_ONCE(sc->ptp_guard_until_ns, until_ns); +} + +/* Unregister PHC and clear PTP callbacks. */ void ath9k_ptp_remove(struct ath_softc *sc) { - if (sc->ptp_clock) { - ptp_clock_unregister(sc->ptp_clock); - sc->ptp_clock = NULL; - ath_info(ath9k_hw_common(sc->sc_ah), "removed PHC clock\n"); - } -} \ No newline at end of file + if (sc->ptp_clock) { + ptp_clock_unregister(sc->ptp_clock); + sc->ptp_clock = NULL; + ath_info(ath9k_hw_common(sc->sc_ah), "removed PHC clock\n"); + } + sc->sc_ah->ptp_wrap_rebase = NULL; + sc->sc_ah->ptp_settsf_log = NULL; + sc->sc_ah->ptp_resettsf_log = NULL; +} diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 7546dbea258f..fec9b473c5d6 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -14,15 +14,14 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -//focus: wiptp #include +#include #include #include #include "ath9k.h" #include "ar9003_mac.h" -// focus: wiptp #include "../ath.h" #define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb)) @@ -479,10 +478,9 @@ void ath_startrecv(struct ath_softc *sc) static void ath_flushrecv(struct ath_softc *sc) { - // focus: wiptp if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) - ath_rx_tasklet(sc, 1, true, NULL); // FIXME: wiptp (NULL) - ath_rx_tasklet(sc, 1, false, NULL); // FIXME: wiptp (NULL) + ath_rx_tasklet(sc, 1, true, NULL); + ath_rx_tasklet(sc, 1, false, NULL); } bool ath_stoprecv(struct ath_softc *sc) @@ -542,10 +540,72 @@ static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) return false; } +static void ath9k_ptp_rx_parse_diag_update(struct ath_softc *sc, + const struct sk_buff *skb, + bool is_ptp, u8 msg_type, + u16 seq_id, + const struct ath9k_ptp_parse_info *info) +{ + struct ieee80211_hdr *hdr; + u16 copy_len; + + if (!sc || !info) + return; + + WRITE_ONCE(sc->ptp_rx_parse_attempt_cnt, + READ_ONCE(sc->ptp_rx_parse_attempt_cnt) + 1); + if (!is_ptp) + WRITE_ONCE(sc->ptp_rx_parse_miss_cnt, + READ_ONCE(sc->ptp_rx_parse_miss_cnt) + 1); + + WRITE_ONCE(sc->ptp_rx_parse_last_reason, info->reason); + WRITE_ONCE(sc->ptp_rx_parse_last_msgtype, msg_type); + WRITE_ONCE(sc->ptp_rx_parse_last_seqid, seq_id); + WRITE_ONCE(sc->ptp_rx_parse_last_hdrlen, info->hdrlen); + WRITE_ONCE(sc->ptp_rx_parse_last_payload_len, info->payload_len); + WRITE_ONCE(sc->ptp_rx_parse_last_snap_off, info->snap_off); + WRITE_ONCE(sc->ptp_rx_parse_last_ptp_off, info->ptp_off); + WRITE_ONCE(sc->ptp_rx_parse_last_ptp_len, info->ptp_len); + WRITE_ONCE(sc->ptp_rx_parse_last_ethertype, info->ethertype); + WRITE_ONCE(sc->ptp_rx_parse_last_sport, info->sport); + WRITE_ONCE(sc->ptp_rx_parse_last_dport, info->dport); + WRITE_ONCE(sc->ptp_rx_parse_last_ip_version, info->ip_version); + WRITE_ONCE(sc->ptp_rx_parse_last_ip_proto, info->ip_proto); + WRITE_ONCE(sc->ptp_rx_parse_last_ihl, info->ihl); + + if (!skb) { + WRITE_ONCE(sc->ptp_rx_parse_last_skb_len, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_bytes_len, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_fc, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_seq_ctrl, 0); + return; + } + + WRITE_ONCE(sc->ptp_rx_parse_last_skb_len, skb->len); + copy_len = min_t(u16, skb->len, (u16)ATH9K_PTP_RX_PARSE_BYTES); + WRITE_ONCE(sc->ptp_rx_parse_last_bytes_len, copy_len); + if (copy_len) + skb_copy_bits(skb, 0, sc->ptp_rx_parse_last_bytes, copy_len); + + if (skb->len < sizeof(*hdr)) { + WRITE_ONCE(sc->ptp_rx_parse_last_fc, 0); + WRITE_ONCE(sc->ptp_rx_parse_last_seq_ctrl, 0); + return; + } + + hdr = (struct ieee80211_hdr *)skb->data; + WRITE_ONCE(sc->ptp_rx_parse_last_fc, le16_to_cpu(hdr->frame_control)); + WRITE_ONCE(sc->ptp_rx_parse_last_seq_ctrl, le16_to_cpu(hdr->seq_ctrl)); +} + static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); bool skip_beacon = false; + u64 read_start_ns; + u64 read_end_ns; + u64 read_dur_ns; + u64 tsf; if (skb->len < 24 + 8 + 2 + 2) return; @@ -556,6 +616,22 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) sc->ps_flags &= ~PS_BEACON_SYNC; ath_dbg(common, PS, "Reconfigure beacon timers based on synchronized timestamp\n"); + read_start_ns = ktime_get_ns(); + tsf = ath9k_hw_gettsf64(sc->sc_ah); + read_end_ns = ktime_get_ns(); + read_dur_ns = read_end_ns - read_start_ns; + if (read_dur_ns > U32_MAX) + read_dur_ns = U32_MAX; + /* + * IBSS joiners also use PS_BEACON_SYNC to arm beacon timers + * after the explicit reset_tsf()/wrap_rebase path. Keep the + * infrastructure reanchor restricted to managed STA mode so we + * do not clobber the IBSS TSF-step state on the first beacon. + */ + if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION && + READ_ONCE(sc->ptp_infra_reanchor_enable) && + !ath9k_ptp_infra_reanchor(sc, tsf, (u32)read_dur_ns)) + sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT if (ath9k_is_chanctx_enabled()) { @@ -792,20 +868,144 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc, return bf; } -static void ath9k_process_tsf(struct ath_rx_status *rs, +static u64 ath9k_ptp_extend_rx(struct ath_softc *sc, u32 tstamp, u64 tsf, + u32 epoch); +static u64 ath9k_extend_tsf(u64 tsf, u32 tstamp); + +static void ath9k_process_tsf(struct ath_softc *sc, + struct ath_rx_status *rs, struct ieee80211_rx_status *rxs, - u64 tsf) + u64 tsf, u32 epoch) +{ + /* Stateless extend for general mactime; PTP wrap state is updated later. */ + rxs->mactime = ath9k_extend_tsf(tsf, rs->rs_tstamp); +} + +static u64 ath9k_extend_tsf(u64 tsf, u32 tstamp) { u32 tsf_lower = tsf & 0xffffffff; + u64 full = (tsf & ~0xffffffffULL) | tstamp; + + if (tstamp > tsf_lower && + unlikely(tstamp - tsf_lower > 0x10000000)) + full -= 0x100000000ULL; + + if (tstamp < tsf_lower && + unlikely(tsf_lower - tstamp > 0x10000000)) + full += 0x100000000ULL; + + return full; +} + +#define ATH9K_PTP_WRAP_RESYNC_THRESH_US 0x80000000ULL + +static bool ath9k_ptp_sync_rx_duplicate(struct ath_softc *sc, + const struct ieee80211_hdr *hdr, + u8 msg_type, u16 seq_id, u64 rx_ext) +{ + unsigned long flags; + u32 window_us; + u64 last_ext; + u64 delta_us = 0; + bool drop = false; + + if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION || + msg_type != ATH9K_PTP_MSGTYPE_SYNC) + return false; + + window_us = READ_ONCE(sc->ptp_sync_dup_window_us); + if (!window_us) + return false; + + spin_lock_irqsave(&sc->systim_lock, flags); + if (sc->ptp_sync_dup_valid && + sc->ptp_sync_dup_last_seqid == seq_id && + ether_addr_equal(sc->ptp_sync_dup_last_src, hdr->addr2)) { + last_ext = sc->ptp_sync_dup_last_ext; + if (rx_ext >= last_ext) { + delta_us = rx_ext - last_ext; + if (delta_us <= window_us) { + sc->ptp_sync_dup_drop_cnt++; + sc->ptp_sync_dup_last_delta_us = (u32)delta_us; + drop = true; + } + } + } + + sc->ptp_sync_dup_valid = 1; + sc->ptp_sync_dup_last_ext = rx_ext; + sc->ptp_sync_dup_last_seqid = seq_id; + ether_addr_copy(sc->ptp_sync_dup_last_src, hdr->addr2); + spin_unlock_irqrestore(&sc->systim_lock, flags); + + return drop; +} - rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp; - if (rs->rs_tstamp > tsf_lower && - unlikely(rs->rs_tstamp - tsf_lower > 0x10000000)) - rxs->mactime -= 0x100000000ULL; +/* Extend 32-bit RX TSF using wrap and glitch guards. */ +static u64 ath9k_ptp_extend_rx(struct ath_softc *sc, u32 tstamp, u64 tsf, + u32 epoch) +{ + unsigned long flags; + u64 ext; + u64 stateless; + u64 diff; + u32 delta; + u32 last; + u64 prev_ext; + + spin_lock_irqsave(&sc->systim_lock, flags); + stateless = ath9k_extend_tsf(tsf, tstamp); + if (!sc->ptp_rx_wrap_valid || + sc->ptp_rx_wrap_epoch != epoch) { + sc->ptp_rx_wrap_valid = 1; + sc->ptp_rx_wrap_epoch = epoch; + sc->ptp_rx_wrap_last = tstamp; + ext = stateless; + sc->ptp_rx_wrap_ext = ext; + sc->ptp_rx_wrap_last_prev = tstamp; + sc->ptp_rx_wrap_delta = 0; + sc->ptp_rx_wrap_delta_s = 0; + sc->ptp_rx_wrap_prev_ext = ext; + } else { + last = sc->ptp_rx_wrap_last; + prev_ext = sc->ptp_rx_wrap_ext; + if (READ_ONCE(sc->ptp_wrap_glitch_thresh) && + tstamp < last) { + u32 back = last - tstamp; + if (back < READ_ONCE(sc->ptp_wrap_glitch_thresh)) { + sc->ptp_rx_wrap_last_prev = last; + sc->ptp_rx_wrap_delta = 0; + sc->ptp_rx_wrap_delta_s = -(s32)back; + sc->ptp_rx_wrap_prev_ext = prev_ext; + ext = prev_ext; + goto check_resync; + } + } + delta = tstamp - last; + sc->ptp_rx_wrap_last_prev = last; + sc->ptp_rx_wrap_delta = delta; + sc->ptp_rx_wrap_delta_s = (s32)delta; + sc->ptp_rx_wrap_prev_ext = prev_ext; + sc->ptp_rx_wrap_last = tstamp; + sc->ptp_rx_wrap_ext = prev_ext + delta; + ext = sc->ptp_rx_wrap_ext; + +check_resync: + diff = (ext > stateless) ? (ext - stateless) : + (stateless - ext); + if (unlikely(diff > ATH9K_PTP_WRAP_RESYNC_THRESH_US)) { + sc->ptp_rx_wrap_epoch = epoch; + sc->ptp_rx_wrap_last = tstamp; + sc->ptp_rx_wrap_ext = stateless; + sc->ptp_rx_wrap_delta = 0; + sc->ptp_rx_wrap_delta_s = 0; + ext = stateless; + } + } +out: + spin_unlock_irqrestore(&sc->systim_lock, flags); - if (rs->rs_tstamp < tsf_lower && - unlikely(tsf_lower - rs->rs_tstamp > 0x10000000)) - rxs->mactime += 0x100000000ULL; + return ext; } /* @@ -817,7 +1017,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, struct sk_buff *skb, struct ath_rx_status *rx_stats, struct ieee80211_rx_status *rx_status, - bool *decrypt_error, u64 tsf) + bool *decrypt_error, u64 tsf, u32 epoch) { struct ieee80211_hw *hw = sc->hw; struct ath_hw *ah = sc->sc_ah; @@ -872,7 +1072,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); - ath9k_process_tsf(rx_stats, rx_status, tsf); + ath9k_process_tsf(sc, rx_stats, rx_status, tsf, epoch); ath_debug_stat_rx(sc, rx_stats); /* @@ -1015,20 +1215,40 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc, } } +static u32 ath9k_rx_frame_duration(struct ath_softc *sc, + struct ath_rx_status *rs, + struct ieee80211_rx_status *rxs, + u16 len) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + const struct ieee80211_rate *rate; + bool is_sgi, is_40, is_sp; + int phy; + + is_sgi = !!(rxs->enc_flags & RX_ENC_FLAG_SHORT_GI); + is_40 = !!(rxs->bw == RATE_INFO_BW_40); + is_sp = !!(rxs->enc_flags & RX_ENC_FLAG_SHORTPRE); + + if (rxs->encoding == RX_ENC_HT) + return ath_pkt_duration(sc, rxs->rate_idx, len, + is_40, is_sgi, is_sp); + + phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM; + rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx]; + + return ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100, + len, rxs->rate_idx, is_sp); +} + static void ath_rx_count_airtime(struct ath_softc *sc, struct ath_rx_status *rs, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_sta *sta; struct ieee80211_rx_status *rxs; - const struct ieee80211_rate *rate; - bool is_sgi, is_40, is_sp; - int phy; - u16 len = rs->rs_datalen; - u32 airtime = 0; + u32 airtime; u8 tidno; if (!ieee80211_is_data(hdr->frame_control)) @@ -1042,30 +1262,13 @@ static void ath_rx_count_airtime(struct ath_softc *sc, tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK; rxs = IEEE80211_SKB_RXCB(skb); - - is_sgi = !!(rxs->enc_flags & RX_ENC_FLAG_SHORT_GI); - is_40 = !!(rxs->bw == RATE_INFO_BW_40); - is_sp = !!(rxs->enc_flags & RX_ENC_FLAG_SHORTPRE); - - if (!!(rxs->encoding == RX_ENC_HT)) { - /* MCS rates */ - - airtime += ath_pkt_duration(sc, rxs->rate_idx, len, - is_40, is_sgi, is_sp); - } else { - - phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM; - rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx]; - airtime += ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100, - len, rxs->rate_idx, is_sp); - } + airtime = ath9k_rx_frame_duration(sc, rs, rxs, rs->rs_datalen); ieee80211_sta_register_airtime(sta, tidno, 0, airtime); exit: rcu_read_unlock(); } -// focus: wiptp int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp, ktime_t *tstamp) // FIXME: tstamp unused { struct ath_rxbuf *bf; @@ -1080,6 +1283,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp, ktime_t *tstamp) // bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); int dma_type; u64 tsf = 0; + u32 read_dur_ns = 0; unsigned long flags; dma_addr_t new_buf_addr; unsigned int budget = 512; @@ -1093,8 +1297,22 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp, ktime_t *tstamp) // qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; tsf = ath9k_hw_gettsf64(ah); + ath9k_ptp_mon_update(&sc->ptp_mon.tsf64, tsf); do { + u64 tsf_snap = 0; + s64 snap_offset = 0; + s64 snap_offset_rem = 0; + u32 snap_epoch = 0; + bool is_ptp = false; + bool rebase_quarantine = ath9k_ptp_rebase_quarantine_active(sc); + bool need_ptp = READ_ONCE(sc->ptp_evtlog_ptp_only) || + READ_ONCE(sc->ptp_guard_timeout_ms) || + READ_ONCE(sc->ptp_guard_after_event_ms) || + rebase_quarantine; + struct ath9k_ptp_parse_info parse_info = { 0 }; + u8 msg_type = 0xFF; + u16 seq_id = 0xFFFF; bool decrypt_error = false; memset(&rs, 0, sizeof(rs)); @@ -1122,8 +1340,15 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp, ktime_t *tstamp) // rxs = IEEE80211_SKB_RXCB(hdr_skb); memset(rxs, 0, sizeof(struct ieee80211_rx_status)); + read_dur_ns = 0; + ath9k_ptp_epoch_snapshot(sc, &tsf_snap, + &snap_offset, + &snap_offset_rem, + &snap_epoch, + NULL); retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs, - &decrypt_error, tsf); + &decrypt_error, tsf_snap, + snap_epoch); if (retval) goto requeue_drop_frag; @@ -1201,6 +1426,45 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp, ktime_t *tstamp) // if (rxs->flag & RX_FLAG_MMIC_STRIPPED) skb_trim(skb, skb->len - 8); + if (need_ptp) { + is_ptp = ath9k_ptp_parse_skb_diag(skb, &msg_type, + &seq_id, NULL, NULL, + &parse_info); + ath9k_ptp_rx_parse_diag_update(sc, skb, is_ptp, + msg_type, seq_id, + &parse_info); + if (is_ptp && rebase_quarantine) { + u64 now_ns = ktime_get_ns(); + + WRITE_ONCE(sc->ptp_rx_ptp_seen, + READ_ONCE(sc->ptp_rx_ptp_seen) + 1); + WRITE_ONCE(sc->ptp_rx_hwts_miss, + READ_ONCE(sc->ptp_rx_hwts_miss) + 1); + if (msg_type < ATH9K_PTP_MSGTYPE_MAX) { + WRITE_ONCE(sc->ptp_rx_type_seen[msg_type], + READ_ONCE(sc->ptp_rx_type_seen[msg_type]) + 1); + WRITE_ONCE(sc->ptp_rx_type_miss[msg_type], + READ_ONCE(sc->ptp_rx_type_miss[msg_type]) + 1); + } + WRITE_ONCE(sc->ptp_last_event_ns, now_ns); + WRITE_ONCE(sc->ptp_last_event_rx_ns, now_ns); + WRITE_ONCE(sc->ptp_last_rx_msgtype, msg_type); + WRITE_ONCE(sc->ptp_last_rx_seqid, seq_id); + WRITE_ONCE(sc->ptp_rx_drop_cnt, + READ_ONCE(sc->ptp_rx_drop_cnt) + 1); + WRITE_ONCE(sc->ptp_rebase_drop_cnt, + READ_ONCE(sc->ptp_rebase_drop_cnt) + 1); + dev_kfree_skb_any(skb); + goto requeue; + } + if (is_ptp) + ath9k_ptp_epoch_snapshot(sc, &tsf_snap, + &snap_offset, + &snap_offset_rem, + &snap_epoch, + &read_dur_ns); + } + spin_lock_irqsave(&sc->sc_pm_lock, flags); if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | PS_WAIT_FOR_CAB | @@ -1213,27 +1477,161 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp, ktime_t *tstamp) // ath9k_apply_ampdu_details(sc, &rs, rxs); ath_debug_rate_stats(sc, &rs, skb); ath_rx_count_airtime(sc, &rs, skb); + if (rs.is_mybeacon && + sc->sc_ah->opmode == NL80211_IFTYPE_STATION && + READ_ONCE(sc->ptp_sta_hidden_step_thresh_ns)) { + u64 host_start_ns = ktime_get_raw_ns(); + u64 tsf_now = ath9k_hw_gettsf64(ah); + u64 host_end_ns = ktime_get_raw_ns(); + u64 host_mid_ns = host_start_ns + + ((host_end_ns - host_start_ns) >> 1); + + ath9k_ptp_sta_hidden_step_check(sc, tsf_now, host_mid_ns); + } hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_ack(hdr->frame_control)) ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp); - // focus: wiptp - // ath9k_cyc2hwtstamp(sc, skb_hwtstamps(skb), rs.rs_tstamp); // TODO: trying this line - skb_hwtstamps(skb)->hwtstamp = (u64)(rs.rs_tstamp - 2); // TODO: repo uses this one - - // skb_hwtstamps(skb)->hwtstamp = ns_to_ktime((u64)(rs.rs_tstamp)); // TODO: repo uses this one - - // printk("ath9k: %s | rs.rs_tstamp: %lu | skb_hwtstamps(skb)->hwtstamp: %lld\n", __FUNCTION__, rs.rs_tstamp, ktime_to_ns(skb_hwtstamps(skb)->hwtstamp)); // tb350: debug - - // printk("ath9k: %s rx tstamp: %lld\n", __FUNCTION__, ktime_to_ns(skb_hwtstamps(skb)->hwtstamp)); // wiptp - - // ath_info( - // ath9k_hw_common(ah), "get ts after rx: %lld\n", - // ktime_to_ns(skb_hwtstamps(skb)->hwtstamp) - // ); + { + u64 rx_ext = rxs->mactime; + u64 rx_cycle; + u32 rx_duration; + bool epoch_ok; + u64 hwtstamp_ns; + u64 handoff_ns = 0; + epoch_ok = (snap_epoch == READ_ONCE(sc->ptp_epoch)); + if (is_ptp && epoch_ok) + rx_ext = ath9k_ptp_extend_rx(sc, rs.rs_tstamp, + tsf_snap, snap_epoch); + if (is_ptp && epoch_ok && + ath9k_ptp_sync_rx_duplicate(sc, hdr, msg_type, + seq_id, rx_ext)) { + dev_kfree_skb_any(skb); + goto requeue; + } + if (is_ptp) + ath9k_ptp_guard_event(sc); + if (is_ptp && msg_type == ATH9K_PTP_MSGTYPE_DELAY_REQ) + ath9k_ptp_guard_delay_req(sc, seq_id); + if (is_ptp && msg_type == ATH9K_PTP_MSGTYPE_DELAY_RESP) + ath9k_ptp_guard_delay_resp(sc, seq_id); + + rx_duration = ath9k_rx_frame_duration(sc, &rs, rxs, + rs.rs_datalen); + rx_cycle = rx_ext; + if (READ_ONCE(sc->ptp_rx_use_duration)) + rx_cycle += rx_duration; + ath9k_ptp_mon_update(&sc->ptp_mon.rx_ext, rx_ext); + WRITE_ONCE(sc->ptp_sample_rx.tsf64, tsf_snap); + WRITE_ONCE(sc->ptp_sample_rx.tstamp, rs.rs_tstamp); + WRITE_ONCE(sc->ptp_sample_rx.duration, rx_duration); + WRITE_ONCE(sc->ptp_sample_rx.read_dur_ns, read_dur_ns); + WRITE_ONCE(sc->ptp_sample_rx.ext, rx_ext); + WRITE_ONCE(sc->ptp_sample_rx.rebase_cnt, + READ_ONCE(sc->ptp_wrap_rebase_cnt)); + WRITE_ONCE(sc->ptp_sample_rx.tsf_offset, snap_offset); + WRITE_ONCE(sc->ptp_sample_rx.tsf_offset_ns_rem, snap_offset_rem); + WRITE_ONCE(sc->ptp_sample_rx.epoch, snap_epoch); + WRITE_ONCE(sc->ptp_sample_rx.epoch_valid, 1); + ath9k_cyc2hwtstamp_sample(sc, skb_hwtstamps(skb), + rx_cycle, &sc->ptp_sample_rx); + hwtstamp_ns = ktime_to_ns(skb_hwtstamps(skb)->hwtstamp); + WRITE_ONCE(sc->ptp_last_rx_hwtstamp_ns, hwtstamp_ns); + WRITE_ONCE(sc->ptp_last_rx_mactime, rxs->mactime); + WRITE_ONCE(sc->ptp_last_rx_tsf64, tsf_snap); + WRITE_ONCE(sc->ptp_last_rx_tstamp, rs.rs_tstamp); + if (is_ptp) { + WRITE_ONCE(sc->ptp_last_rx_tsf_offset, + snap_offset); + WRITE_ONCE(sc->ptp_last_rx_rebase, + READ_ONCE(sc->ptp_wrap_rebase_cnt)); + } + if (is_ptp) { + u64 now_ns = ktime_get_ns(); + u64 prev_ns; + s64 delta_ns; + + WRITE_ONCE(sc->ptp_rx_ptp_seen, + READ_ONCE(sc->ptp_rx_ptp_seen) + 1); + WRITE_ONCE(sc->ptp_last_event_ns, now_ns); + WRITE_ONCE(sc->ptp_last_event_rx_ns, now_ns); + WRITE_ONCE(sc->ptp_last_rx_msgtype, msg_type); + WRITE_ONCE(sc->ptp_last_rx_seqid, seq_id); + if (READ_ONCE(sc->ptp_stall_enable)) { + prev_ns = READ_ONCE(sc->ptp_stall_last_rx_ns); + if (prev_ns) { + delta_ns = (s64)hwtstamp_ns - + (s64)prev_ns; + WRITE_ONCE(sc->ptp_stall_last_rx_delta_ns, + delta_ns); + if (delta_ns < + (s64)READ_ONCE(sc->ptp_stall_thresh_ns)) + WRITE_ONCE(sc->ptp_stall_rx_cnt, + READ_ONCE(sc->ptp_stall_rx_cnt) + 1); + } + WRITE_ONCE(sc->ptp_stall_last_rx_ns, + hwtstamp_ns); + } + if (msg_type < ATH9K_PTP_MSGTYPE_MAX) + WRITE_ONCE(sc->ptp_rx_type_seen[msg_type], + READ_ONCE(sc->ptp_rx_type_seen[msg_type]) + 1); + if (hwtstamp_ns) + WRITE_ONCE(sc->ptp_rx_hwts_done, + READ_ONCE(sc->ptp_rx_hwts_done) + 1); + else + WRITE_ONCE(sc->ptp_rx_hwts_miss, + READ_ONCE(sc->ptp_rx_hwts_miss) + 1); + if (msg_type < ATH9K_PTP_MSGTYPE_MAX) { + if (hwtstamp_ns) + WRITE_ONCE(sc->ptp_rx_type_done[msg_type], + READ_ONCE(sc->ptp_rx_type_done[msg_type]) + 1); + else + WRITE_ONCE(sc->ptp_rx_type_miss[msg_type], + READ_ONCE(sc->ptp_rx_type_miss[msg_type]) + 1); + } + } + if (!READ_ONCE(sc->ptp_evtlog_ptp_only) || is_ptp) { + ath9k_ptp_evtlog_add(sc, ATH9K_PTP_EVT_RX, tsf, + rs.rs_tstamp, 0, + rx_ext, + hwtstamp_ns, + &sc->ptp_sample_rx, + skb); + } + if (is_ptp) { + struct ath9k_ptp_handoff_rx_meta rx_meta = { + .rx_rxs_flags = rxs->flag, + .rx_ampdu_reference = rxs->ampdu_reference, + .rx_enc_flags = rxs->enc_flags, + .rx_rs_status = rs.rs_status, + .rx_rs_flags = rs.rs_flags, + .rx_phyerr = rs.rs_phyerr, + .rx_rssi = rs.rs_rssi, + .rx_signal = rxs->signal, + .rx_keyix = rs.rs_keyix, + .rx_rate = rs.rs_rate, + .rx_rate_idx = rxs->rate_idx, + .rx_antenna = rs.rs_antenna, + .rx_bw = rxs->bw, + .rx_nss = rxs->nss, + .rx_chains = rxs->chains, + .rx_more = rs.rs_more, + .rx_isaggr = rs.rs_isaggr, + .rx_firstaggr = rs.rs_firstaggr, + .rx_moreaggr = rs.rs_moreaggr, + .rx_num_delims = rs.rs_num_delims, + }; + + handoff_ns = ktime_get_ns(); + ath9k_ptp_handoff_add(sc, ATH9K_PTP_HANDOFF_RX, + msg_type, seq_id, + handoff_ns, hwtstamp_ns, + &sc->ptp_sample_rx, + skb, &rx_meta, NULL); + } + } - ieee80211_rx(hw, skb); requeue_drop_frag: diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 028c460ccb05..cdc4cfc8a904 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -1113,7 +1113,6 @@ enum { #define AR_PCIE_SERDES2 0x4044 #define AR_PCIE_PM_CTRL(_ah) (AR_SREV_9340(_ah) ? 0x4004 : 0x4014) -// focus: wiptp #define AR_PCIE_PM_CTRL_ENA 0x00040000 // FIXME: default -> 0x00080000 #define AR_PCIE_PHY_REG3 0x18c08 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 1f7c05fa3727..a471183c6cfc 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -14,14 +14,10 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -// focus: wiptp -#include - #include #include "ath9k.h" #include "ar9003_mac.h" -// focus: wiptp #include "../ath.h" #define BITS_PER_BYTE 8 @@ -46,7 +42,6 @@ #define ATH9K_PWRTBL_11NA_HT_SHIFT 8 #define ATH9K_PWRTBL_11NG_HT_SHIFT 12 - static u16 bits_per_symbol[][2] = { /* 20MHz 40MHz */ { 26, 54 }, /* 0: BPSK */ @@ -209,21 +204,16 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); - // focus: wiptp int i; if (!ath_merge_ratetbl(sta, bf, tx_info)) ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates, ARRAY_SIZE(bf->rates)); - // focus: wiptp - // for (i = 0; i < ARRAY_SIZE(bf->rates); ++i) { - // bf->rates[i].idx = 0x0; - // } // FIXME: set constant index 0 (mcs0) for all rates } static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, - struct sk_buff *skb) + struct sk_buff *skb) { struct ath_frame_info *fi = get_frame_info(skb); int q = fi->txq; @@ -320,8 +310,6 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) list_add_tail(&bf->list, &bf_head); - // focus: wiptp - // ath_warn(ath9k_hw_common(sc->sc_ah), "ath_tx_flush_tid skb=%p\n", skb); // TODO: only debug ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0); } @@ -402,7 +390,6 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, } list_add_tail(&bf->list, &bf_head); - // ath_warn(ath9k_hw_common(sc->sc_ah), "ath_tid_drain, skb=%p\n", skb); // TODO: only debug ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0); } } @@ -528,9 +515,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, int bar_index = -1; skb = bf->bf_mpdu; - //TODO: in older kernels: "hdr = (struct ieee80211_hdr *)skb->data;" in this place - // focus: - // ath_warn(ath9k_hw_common(sc->sc_ah), "ath9k: ath_tx_complete_aggr skb=%p\n", skb); // TODO: only debug tx_info = IEEE80211_SKB_CB(skb); @@ -791,8 +775,6 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts, sta); } - // focus: wiptp - // ath_warn(ath9k_hw_common(sc->sc_ah), "ath_tx_process_buffer: skb=%p\n", bf->bf_mpdu); // TODO: only debug ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok); } else @@ -1041,8 +1023,6 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, list_add(&bf->list, &bf_head); ath_tx_update_baw(sc, tid, bf); - // focus: wiptp - // ath_warn(ath9k_hw_common(sc->sc_ah), "%s: skb=%p\n", __FUNCTION__, bf->bf_mpdu); // TODO: only debug ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0); continue; @@ -1435,7 +1415,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) return htype; } -// focus: wiptp static u64 pkt_counter; static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, @@ -1510,14 +1489,10 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, info.keyix = fi->keyix; info.keytype = fi->keytype; - // focus: wiptp if (info.type == ATH9K_PKT_TYPE_NORMAL) { pkt_counter++; if (pkt_counter % 4 == 0) { - // info.flags |= ATH9K_TXDESC_VEOL; - // printk("ath9k: veol set %#llx\n", pkt_counter); // TODO: only debug } - // printk("ath9k: cbr expired counter 0x%08x linkaddr = 0x%016llx\n", REG_READ(sc->sc_ah, AR_QSTS(info.qcu)), info.link); // TODO: only debug } if (aggr) { @@ -1557,8 +1532,7 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, bf_prev->bf_next = bf; bf_prev = bf; - // focus: wiptp - if (nframes >= 32) // FIXME: wiptp -> 32 / default -> 2 + if (nframes >= 32) break; ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf); @@ -1598,8 +1572,6 @@ static int ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, return -EBUSY; } - // focus: wiptp - // printk("ath9k: aggr = %d, qdepth = %d\n", aggr, txq->axq_depth); // TODO: only debug ath_set_rates(tid->an->vif, tid->an->sta, bf); if (aggr) @@ -1939,8 +1911,6 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, lastbf = bf->bf_lastbf; list_cut_position(&bf_head, list, &lastbf->list); - // focus: wiptp - // ath_warn(ath9k_hw_common(sc->sc_ah), "%s: skb=%p\n", __FUNCTION__, bf->bf_mpdu); // TODO: only debug ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); } @@ -2294,15 +2264,10 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, bf->bf_state.seqno = seqno; } - // focus: wiptp start if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - // ath_warn(common, "ath9k: ath_tx_setup_buffer SKBTX_HW_TSTAMP\n"); // TODO: only debug - // printk("ath9k: ath_tx_setup_buffer SKBTX_HW_TSTAMP\n"); // TODO: only debug skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; } - // printk("ath9k: ath_tx_setup_buffer skb=%p, tx_flags: %u\n", skb, skb_shinfo(skb)->tx_flags); // TODO: only debug skb_tx_timestamp(skb); - // focus: wiptp end bf->bf_mpdu = skb; @@ -2344,6 +2309,8 @@ void ath_assign_seq(struct ath_common *common, struct sk_buff *skb) hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); } +static bool ath9k_ptp_is_event_msg(u8 msg_type); + static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath_tx_control *txctl) { @@ -2355,6 +2322,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath_softc *sc = hw->priv; int frmlen = skb->len + FCS_LEN; int padpos, padsize; + u8 ptp_msg_type = 0xFF; + u16 ptp_seq_id = 0xFFFF; + bool ptp_parsed = false; + bool ptp_hwts = !!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP); /* NOTE: sta can be NULL according to net/mac80211.h */ if (sta) @@ -2369,6 +2340,23 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, ath_assign_seq(ath9k_hw_common(sc->sc_ah), skb); + if (ptp_hwts) + ptp_parsed = ath9k_ptp_parse_skb(skb, &ptp_msg_type, + &ptp_seq_id); + + if (ptp_parsed && ptp_msg_type == ATH9K_PTP_MSGTYPE_DELAY_REQ) + ath9k_ptp_guard_delay_req(sc, ptp_seq_id); + + if (READ_ONCE(sc->ptp_noack_ptp_event_enable) && + ptp_parsed && + ath9k_ptp_is_event_msg(ptp_msg_type)) { + info->flags |= IEEE80211_TX_CTL_NO_ACK; + WRITE_ONCE(sc->ptp_tx_noack_event_cnt, + READ_ONCE(sc->ptp_tx_noack_event_cnt) + 1); + WRITE_ONCE(sc->ptp_tx_noack_last_msgtype, ptp_msg_type); + WRITE_ONCE(sc->ptp_tx_noack_last_seqid, ptp_seq_id); + } + if ((vif && vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_AP_VLAN) || !ieee80211_is_data(hdr->frame_control)) @@ -2535,8 +2523,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); - // focus: wiptp - // ath_warn(common, "ath9k: ath_tx_complete skb=%p\n", skb); // TODO: only debug if (sc->sc_ah->caldata) set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags); @@ -2578,6 +2564,105 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, __skb_queue_tail(&txq->complete_q, skb); } +static u64 ath9k_extend_tsf(u64 tsf, u32 tstamp) +{ + u32 tsf_lower = tsf & 0xffffffff; + u64 full = (tsf & ~0xffffffffULL) | tstamp; + + if (tstamp > tsf_lower && + unlikely(tstamp - tsf_lower > 0x10000000)) + full -= 0x100000000ULL; + + if (tstamp < tsf_lower && + unlikely(tsf_lower - tstamp > 0x10000000)) + full += 0x100000000ULL; + + return full; +} + +#define ATH9K_PTP_WRAP_RESYNC_THRESH_US 0x80000000ULL + +/* Extend 32-bit TX TSF using wrap and glitch guards. */ +static u64 ath9k_ptp_extend_tx(struct ath_softc *sc, u32 tstamp, u64 tsf, + u32 epoch) +{ + unsigned long flags; + u64 ext; + u64 stateless; + u64 diff; + u32 delta; + u32 last; + u64 prev_ext; + + spin_lock_irqsave(&sc->systim_lock, flags); + stateless = ath9k_extend_tsf(tsf, tstamp); + if (!sc->ptp_tx_wrap_valid || + sc->ptp_tx_wrap_epoch != epoch) { + sc->ptp_tx_wrap_valid = 1; + sc->ptp_tx_wrap_epoch = epoch; + sc->ptp_tx_wrap_last = tstamp; + ext = stateless; + sc->ptp_tx_wrap_ext = ext; + sc->ptp_tx_wrap_last_prev = tstamp; + sc->ptp_tx_wrap_delta = 0; + sc->ptp_tx_wrap_delta_s = 0; + sc->ptp_tx_wrap_prev_ext = ext; + } else { + last = sc->ptp_tx_wrap_last; + prev_ext = sc->ptp_tx_wrap_ext; + if (READ_ONCE(sc->ptp_wrap_glitch_thresh) && + tstamp < last) { + u32 back = last - tstamp; + if (back < READ_ONCE(sc->ptp_wrap_glitch_thresh)) { + sc->ptp_tx_wrap_last_prev = last; + sc->ptp_tx_wrap_delta = 0; + sc->ptp_tx_wrap_delta_s = -(s32)back; + sc->ptp_tx_wrap_prev_ext = prev_ext; + ext = prev_ext; + goto check_resync; + } + } + delta = tstamp - last; + sc->ptp_tx_wrap_last_prev = last; + sc->ptp_tx_wrap_delta = delta; + sc->ptp_tx_wrap_delta_s = (s32)delta; + sc->ptp_tx_wrap_prev_ext = prev_ext; + sc->ptp_tx_wrap_last = tstamp; + sc->ptp_tx_wrap_ext = prev_ext + delta; + ext = sc->ptp_tx_wrap_ext; + +check_resync: + diff = (ext > stateless) ? (ext - stateless) : + (stateless - ext); + if (unlikely(diff > ATH9K_PTP_WRAP_RESYNC_THRESH_US)) { + sc->ptp_tx_wrap_epoch = epoch; + sc->ptp_tx_wrap_last = tstamp; + sc->ptp_tx_wrap_ext = stateless; + sc->ptp_tx_wrap_delta = 0; + sc->ptp_tx_wrap_delta_s = 0; + ext = stateless; + } + } +out: + spin_unlock_irqrestore(&sc->systim_lock, flags); + + return ext; +} + +static bool ath9k_ptp_is_event_msg(u8 msg_type) +{ + return msg_type <= 0x3; +} + +static bool ath9k_ptp_tx_event_suspect_status(struct ath_tx_status *ts, + u8 msg_type) +{ + if (!ath9k_ptp_is_event_msg(msg_type)) + return false; + + return ts->ts_shortretry || ts->ts_longretry || ts->ts_rateindex; +} + static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ath_txq *txq, struct list_head *bf_q, struct ieee80211_sta *sta, @@ -2587,26 +2672,208 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); unsigned long flags; int tx_flags = 0; + bool need_ptp; + bool is_ptp = false; + u8 msg_type = 0xFF; + u16 seq_id = 0xFFFF; + bool need_guard = false; + + need_ptp = READ_ONCE(sc->ptp_evtlog_ptp_only) || + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || + READ_ONCE(sc->ptp_guard_timeout_ms) || + READ_ONCE(sc->ptp_guard_after_event_ms); + if (need_ptp) + is_ptp = ath9k_ptp_parse_skb(skb, &msg_type, &seq_id); + need_guard = is_ptp && (msg_type == ATH9K_PTP_MSGTYPE_DELAY_RESP); + if (is_ptp) { + u64 now_ns = ktime_get_ns(); + + ath9k_ptp_guard_event(sc); + WRITE_ONCE(sc->ptp_tx_ptp_seen, + READ_ONCE(sc->ptp_tx_ptp_seen) + 1); + WRITE_ONCE(sc->ptp_last_event_ns, now_ns); + WRITE_ONCE(sc->ptp_last_event_tx_ns, now_ns); + WRITE_ONCE(sc->ptp_last_tx_msgtype, msg_type); + WRITE_ONCE(sc->ptp_last_tx_seqid, seq_id); + if (msg_type < ATH9K_PTP_MSGTYPE_MAX) + WRITE_ONCE(sc->ptp_tx_type_seen[msg_type], + READ_ONCE(sc->ptp_tx_type_seen[msg_type]) + 1); + if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + WRITE_ONCE(sc->ptp_tx_tstamp_miss, + READ_ONCE(sc->ptp_tx_tstamp_miss) + 1); + if (msg_type < ATH9K_PTP_MSGTYPE_MAX && + !(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + WRITE_ONCE(sc->ptp_tx_type_miss[msg_type], + READ_ONCE(sc->ptp_tx_type_miss[msg_type]) + 1); + } - // focus: wiptp start if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - struct skb_shared_hwtstamps shhwtstamps; - u64 fns; + struct skb_shared_hwtstamps shhwtstamps; + u64 fns; + u64 tx_ext; + u64 tsf64; + u64 hwtstamp_ns; + u64 handoff_ns = 0; + u32 read_dur_ns = 0; + s64 tsf_offset; + s64 tsf_offset_rem; + u32 epoch = 0; + bool suspect_tx_status = false; + + if (is_ptp) + WRITE_ONCE(sc->ptp_tx_tstamp_req, + READ_ONCE(sc->ptp_tx_tstamp_req) + 1); + if (is_ptp && msg_type < ATH9K_PTP_MSGTYPE_MAX) + WRITE_ONCE(sc->ptp_tx_type_req[msg_type], + READ_ONCE(sc->ptp_tx_type_req[msg_type]) + 1); + + ath9k_ptp_epoch_snapshot(sc, &tsf64, + &tsf_offset, + &tsf_offset_rem, + &epoch, + is_ptp ? &read_dur_ns : NULL); + if (is_ptp) + suspect_tx_status = + ath9k_ptp_tx_event_suspect_status(ts, msg_type); + /* Stateless extend by default; only update PTP wrap state on PTP+epoch match. */ + tx_ext = ath9k_extend_tsf(tsf64, ts->ts_tstamp); + if (is_ptp && epoch == READ_ONCE(sc->ptp_epoch)) + tx_ext = ath9k_ptp_extend_tx(sc, ts->ts_tstamp, + tsf64, epoch); + { + u64 base = tx_ext; + + if (READ_ONCE(sc->ptp_tx_use_duration)) + base += ts->duration; + fns = (base > 16) ? (base - 16) : base; + } + WRITE_ONCE(sc->ptp_sample_tx.tsf64, tsf64); + WRITE_ONCE(sc->ptp_sample_tx.tstamp, ts->ts_tstamp); + WRITE_ONCE(sc->ptp_sample_tx.duration, ts->duration); + WRITE_ONCE(sc->ptp_sample_tx.read_dur_ns, read_dur_ns); + WRITE_ONCE(sc->ptp_sample_tx.ext, tx_ext); + WRITE_ONCE(sc->ptp_sample_tx.rebase_cnt, + READ_ONCE(sc->ptp_wrap_rebase_cnt)); + WRITE_ONCE(sc->ptp_sample_tx.tsf_offset, tsf_offset); + WRITE_ONCE(sc->ptp_sample_tx.tsf_offset_ns_rem, tsf_offset_rem); + WRITE_ONCE(sc->ptp_sample_tx.epoch, epoch); + WRITE_ONCE(sc->ptp_sample_tx.epoch_valid, 1); + if (need_guard) + ath9k_ptp_guard_delay_resp(sc, seq_id); + ath9k_cyc2hwtstamp_sample(sc, &shhwtstamps, fns, + &sc->ptp_sample_tx); + hwtstamp_ns = ktime_to_ns(shhwtstamps.hwtstamp); + if (is_ptp) { + WRITE_ONCE(sc->ptp_last_tx_tsf_offset, + tsf_offset); + WRITE_ONCE(sc->ptp_last_tx_rebase, + READ_ONCE(sc->ptp_wrap_rebase_cnt)); + } + if (is_ptp && suspect_tx_status) { + WRITE_ONCE(sc->ptp_tx_suspect_status_cnt, + READ_ONCE(sc->ptp_tx_suspect_status_cnt) + 1); + WRITE_ONCE(sc->ptp_tx_suspect_status_last_msgtype, msg_type); + WRITE_ONCE(sc->ptp_tx_suspect_status_last_seqid, seq_id); + WRITE_ONCE(sc->ptp_tx_suspect_status_last_rateindex, + ts->ts_rateindex); + WRITE_ONCE(sc->ptp_tx_suspect_status_last_shortretry, + ts->ts_shortretry); + WRITE_ONCE(sc->ptp_tx_suspect_status_last_longretry, + ts->ts_longretry); + WRITE_ONCE(sc->ptp_tx_suspect_status_last_status, + ts->ts_status); + WRITE_ONCE(sc->ptp_tx_suspect_status_last_flags, + ts->ts_flags); + WRITE_ONCE(sc->ptp_tx_suspect_status_last_tstamp, + ts->ts_tstamp); + WRITE_ONCE(sc->ptp_tx_suspect_status_last_hwtstamp_ns, + hwtstamp_ns); + } - // FIXME: tb350: useless? ------------------------------------- - // ath9k_cyc2hwtstamp(sc, &shhwtstamps, ts->ts_tstamp); - // shhwtstamps.hwtstamp = ktime_add_us(shhwtstamps.hwtstamp, ts->duration); - // tb350: useless? -------------------------------------------- + if (is_ptp) { + if (READ_ONCE(sc->ptp_stall_enable)) { + u64 prev_ns = READ_ONCE(sc->ptp_stall_last_tx_ns); + if (prev_ns) { + s64 delta_ns = (s64)hwtstamp_ns - + (s64)prev_ns; + WRITE_ONCE(sc->ptp_stall_last_tx_delta_ns, + delta_ns); + if (delta_ns < + (s64)READ_ONCE(sc->ptp_stall_thresh_ns)) + WRITE_ONCE(sc->ptp_stall_tx_cnt, + READ_ONCE(sc->ptp_stall_tx_cnt) + 1); + } + WRITE_ONCE(sc->ptp_stall_last_tx_ns, hwtstamp_ns); + } + WRITE_ONCE(sc->ptp_tx_tstamp_done, + READ_ONCE(sc->ptp_tx_tstamp_done) + 1); + WRITE_ONCE(sc->ptp_last_tx_hwtstamp_ns, hwtstamp_ns); + if (msg_type < ATH9K_PTP_MSGTYPE_MAX) + WRITE_ONCE(sc->ptp_tx_type_done[msg_type], + READ_ONCE(sc->ptp_tx_type_done[msg_type]) + 1); + } + /* Guard is armed before timestamp conversion now. */ + if (!READ_ONCE(sc->ptp_evtlog_ptp_only) || is_ptp) { + ath9k_ptp_evtlog_add(sc, ATH9K_PTP_EVT_TX, tsf64, + ts->ts_tstamp, + ts->duration, tx_ext, hwtstamp_ns, + &sc->ptp_sample_tx, skb); + } + if (is_ptp) { + struct ath9k_tx_queue_info txq_info; + struct ath9k_tx_queue_info bcnq_info; + u32 txq_qnum = READ_ONCE(txq->axq_qnum); + u32 beaconq = READ_ONCE(sc->beacon.beaconq); + struct ath9k_ptp_handoff_tx_meta tx_meta = { + .tx_info_flags = tx_info->flags, + .tx_status = ts->ts_status, + .tx_flags = ts->ts_flags, + .tx_hw_queue = tx_info->hw_queue, + .txq_qnum = txq_qnum, + .txq_mac80211_qnum = txq->mac80211_qnum, + .tx_qid = ts->qid, + .tx_tid = ts->tid, + .tx_rateindex = ts->ts_rateindex, + .tx_shortretry = ts->ts_shortretry, + .tx_longretry = ts->ts_longretry, + .tx_rssi = ts->ts_rssi, + .txq_depth = READ_ONCE(txq->axq_depth), + .txq_ampdu_depth = READ_ONCE(txq->axq_ampdu_depth), + .txq_pending_frames = READ_ONCE(txq->pending_frames), + .bcnq_qnum = 0xff, + }; + + if (txq_qnum < ATH9K_NUM_TX_QUEUES && + ath9k_hw_get_txq_props(sc->sc_ah, txq_qnum, + &txq_info)) { + tx_meta.txq_aifs = txq_info.tqi_aifs; + tx_meta.txq_cwmin = txq_info.tqi_cwmin; + tx_meta.txq_cwmax = txq_info.tqi_cwmax; + tx_meta.txq_burst_time = txq_info.tqi_burstTime; + tx_meta.txq_ready_time = txq_info.tqi_readyTime; + } - fns = (u64)(ts->ts_tstamp + ts->duration - 16); // TODO: wiptp repo (magic # ?) - // fns = (u64)(ts->ts_tstamp + ts->duration); // TODO: trying this line - shhwtstamps.hwtstamp = ns_to_ktime(fns); + if (beaconq < ATH9K_NUM_TX_QUEUES && + ath9k_hw_get_txq_props(sc->sc_ah, beaconq, + &bcnq_info)) { + tx_meta.bcnq_qnum = beaconq; + tx_meta.bcnq_aifs = bcnq_info.tqi_aifs; + tx_meta.bcnq_cwmin = bcnq_info.tqi_cwmin; + tx_meta.bcnq_cwmax = bcnq_info.tqi_cwmax; + tx_meta.bcnq_burst_time = bcnq_info.tqi_burstTime; + tx_meta.bcnq_ready_time = bcnq_info.tqi_readyTime; + } - skb_tstamp_tx(skb, &shhwtstamps); + handoff_ns = ktime_get_ns(); + ath9k_ptp_handoff_add(sc, ATH9K_PTP_HANDOFF_TX, + msg_type, seq_id, + handoff_ns, hwtstamp_ns, + &sc->ptp_sample_tx, + skb, NULL, &tx_meta); + } - // printk("ath9k: ts->ts_tstamp: %lu | shhwtstamps.hwtstamp: %lld\n", ts->ts_tstamp, ktime_to_ns(shhwtstamps.hwtstamp)); // tb350: debug - } - // focus: wiptp end## + skb_tstamp_tx(skb, &shhwtstamps); + } if (!txok) tx_flags |= ATH_TX_ERROR; @@ -2781,8 +3048,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) ath_tx_return_buffer(sc, bf_held); } - // focus: wiptp - // ath_warn(ath9k_hw_common(sc->sc_ah), "%s: skb=%p\n", __FUNCTION__, bf->bf_mpdu); // TODO: only debug ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); } @@ -2803,7 +3068,6 @@ void ath_tx_tasklet(struct ath_softc *sc) rcu_read_unlock(); } -// focus: wiptp void ath_tx_edma_tasklet(struct ath_softc *sc, ktime_t *tstamp) // FIXME: unused tstamp { struct ath_tx_status ts; @@ -2856,8 +3120,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc, ktime_t *tstamp) // FIXME: unused bf = list_first_entry(fifo_list, struct ath_buf, list); - // focus: wiptp - // ath_warn(ath9k_hw_common(sc->sc_ah), "%s: skb=%p\n", __FUNCTION__, bf->bf_mpdu); // TODO: only debug if (bf->bf_state.stale) { list_del(&bf->list); diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index f27a94c266c3..e35c2f8dc694 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -36,20 +36,6 @@ static int __ath_regd_init(struct ath_regulatory *reg); #define ATH_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0) /* We enable active scan on these a case by case basis by regulatory domain */ -// #define ATH_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\ -// NL80211_RRF_NO_IR) -// #define ATH_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\ -// NL80211_RRF_NO_IR | \ -// NL80211_RRF_NO_OFDM) - -// /* We allow IBSS on these on a case by case basis by regulatory domain */ -// #define ATH_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\ -// NL80211_RRF_NO_IR) -// #define ATH_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\ -// NL80211_RRF_NO_IR) -// #define ATH_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\ -// NL80211_RRF_NO_IR) - #define ATH_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20, 0) #define ATH_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20, 0) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 7c707358d15c..5c52aa765840 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include #include @@ -4073,6 +4075,12 @@ struct ieee80211_prep_tx_info { * @get_et_strings: Ethtool API to get a set of strings to describe stats * and perhaps other supported types of ethtool data-sets. * + * @get_ts_info: Ethtool API to get timestamping capabilities. + * + * @hwtstamp_get: Retrieve hardware timestamping configuration. + * + * @hwtstamp_set: Set hardware timestamping configuration. + * * @mgd_prepare_tx: Prepare for transmitting a management frame for association * before associated. In multi-channel scenarios, a virtual interface is * bound to a channel before it is associated, but as it isn't associated @@ -4497,6 +4505,16 @@ struct ieee80211_ops { void (*get_et_strings)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data); + int (*get_ts_info)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_ts_info *info); + int (*hwtstamp_get)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct kernel_hwtstamp_config *cfg); + int (*hwtstamp_set)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); void (*mgd_prepare_tx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 20b55a7a82d0..987dc9d0f884 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2137,16 +2137,10 @@ static int ieee80211_change_station(struct wiphy *wiphy, if(sdata->vif.type == NL80211_IFTYPE_MESH_POINT){ switch (params->plink_action) { case NL80211_PLINK_ACTION_BLOCK: - spin_lock_bh(&sdata->u.mesh.macfilter_lock); - // pr_info("DEBUG: MACFilter add entry (called from fn ieee80211_change_station)"); - mesh_add_macfilter_entry(sdata, mac, false); - spin_unlock_bh(&sdata->u.mesh.macfilter_lock); + ieee80211_macfilter_add_entry(sdata, mac, false); break; case NL80211_PLINK_ACTION_OPEN: - spin_lock_bh(&sdata->u.mesh.macfilter_lock); - // pr_info("DEBUG: MACFilter remove entry (called from fn ieee80211_change_station)"); - mesh_remove_macfilter_entry(sdata, mac); - spin_unlock_bh(&sdata->u.mesh.macfilter_lock); + ieee80211_macfilter_remove_entry(sdata, mac); break; default: break; diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 3578bc03b5dc..af0533d7e342 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -21,6 +21,9 @@ #include "debugfs.h" #include "debugfs_netdev.h" #include "driver-ops.h" +#include "sta_info.h" +#include "linux/uaccess.h" + static ssize_t ieee80211_if_read( void *data, @@ -596,6 +599,189 @@ static ssize_t ieee80211_if_parse_tsf( ieee80211_recalc_dtim(local, sdata); return buflen; } + +static ssize_t ieee80211_if_fmt_mesh_tsf_set_enable( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + return scnprintf(buf, buflen, "%u\n", + sdata->u.mesh.tsf_set_enabled ? 1 : 0); +} + +static ssize_t ieee80211_if_parse_mesh_tsf_set_enable( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + u8 val; + + if (kstrtou8(buf, 0, &val)) + return -EINVAL; + + sdata->u.mesh.tsf_set_enabled = !!val; + return buflen; +} +IEEE80211_IF_FILE_RW(mesh_tsf_set_enable); + +static ssize_t ieee80211_if_fmt_ptp_forward( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + return scnprintf(buf, buflen, "%u\n", + sdata->u.mesh.ptp_forward ? 1 : 0); +} + +static ssize_t ieee80211_if_parse_ptp_forward( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + u8 val; + + if (kstrtou8(buf, 0, &val)) + return -EINVAL; + + sdata->u.mesh.ptp_forward = !!val; + return buflen; +} +IEEE80211_IF_FILE_RW(ptp_forward); + +static const char *ieee80211_ptp_tx_path_name(u8 path) +{ + switch (path) { + case IEEE80211_PTP_TX_PATH_SLOW: + return "slow"; + case IEEE80211_PTP_TX_PATH_STA_FAST: + return "sta_fast"; + case IEEE80211_PTP_TX_PATH_MESH_FAST: + return "mesh_fast"; + default: + return "none"; + } +} + +static const char *ieee80211_iftype_name(enum nl80211_iftype iftype) +{ + switch (iftype) { + case NL80211_IFTYPE_STATION: + return "station"; + case NL80211_IFTYPE_AP: + return "ap"; + case NL80211_IFTYPE_ADHOC: + return "adhoc"; + case NL80211_IFTYPE_MESH_POINT: + return "mesh"; + case NL80211_IFTYPE_OCB: + return "ocb"; + case NL80211_IFTYPE_AP_VLAN: + return "ap_vlan"; + default: + return "other"; + } +} + +static ssize_t ptp_tx_path_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sub_if_data *sdata = file->private_data; + char *buf; + ssize_t len, ret; + + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = scnprintf(buf, PAGE_SIZE, + "seen=%lld event=%lld slow=%lld sta_fast=%lld mesh_fast=%lld mesh_fast_eligible=%lld mesh_fast_fallback=%lld sta_present=%lld sta_fast_present=%lld sta_fast_fallback=%lld\n" + "last_path=%s last_iftype=%s last_event=%u last_msgtype=%u last_seq=%u last_ethertype=0x%04x last_sta_present=%u last_sta_fast_present=%u last_mesh_fast_eligible=%u\n", + (long long)atomic64_read(&sdata->ptp_tx_path.seen), + (long long)atomic64_read(&sdata->ptp_tx_path.event_seen), + (long long)atomic64_read(&sdata->ptp_tx_path.slow), + (long long)atomic64_read(&sdata->ptp_tx_path.sta_fast), + (long long)atomic64_read(&sdata->ptp_tx_path.mesh_fast), + (long long)atomic64_read(&sdata->ptp_tx_path.mesh_fast_eligible), + (long long)atomic64_read(&sdata->ptp_tx_path.mesh_fast_fallback), + (long long)atomic64_read(&sdata->ptp_tx_path.sta_present), + (long long)atomic64_read(&sdata->ptp_tx_path.sta_fast_present), + (long long)atomic64_read(&sdata->ptp_tx_path.sta_fast_fallback), + ieee80211_ptp_tx_path_name(READ_ONCE(sdata->ptp_tx_path.last_path)), + ieee80211_iftype_name(READ_ONCE(sdata->ptp_tx_path.last_iftype)), + READ_ONCE(sdata->ptp_tx_path.last_is_event), + READ_ONCE(sdata->ptp_tx_path.last_msgtype), + READ_ONCE(sdata->ptp_tx_path.last_seq), + READ_ONCE(sdata->ptp_tx_path.last_ethertype), + READ_ONCE(sdata->ptp_tx_path.last_sta_present), + READ_ONCE(sdata->ptp_tx_path.last_sta_fast_present), + READ_ONCE(sdata->ptp_tx_path.last_mesh_fast_eligible)); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret; +} + +static ssize_t ptp_tx_path_reset_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sub_if_data *sdata = file->private_data; + + ieee80211_ptp_tx_path_reset(sdata); + return count; +} + +static const struct file_operations ptp_tx_path_ops = { + .read = ptp_tx_path_read, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +static const struct file_operations ptp_tx_path_reset_ops = { + .write = ptp_tx_path_reset_write, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +static ssize_t ieee80211_if_fmt_ibss_tsf_set_enable( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + return scnprintf(buf, buflen, "%u\n", + sdata->u.ibss.tsf_set_enabled ? 1 : 0); +} + +static ssize_t ieee80211_if_parse_ibss_tsf_set_enable( + struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) +{ + u8 val; + + if (kstrtou8(buf, 0, &val)) + return -EINVAL; + + sdata->u.ibss.tsf_set_enabled = !!val; + return buflen; +} +IEEE80211_IF_FILE_RW(ibss_tsf_set_enable); + +static ssize_t ieee80211_if_fmt_ibss_merge_state( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + const struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + u32 count = READ_ONCE(ifibss->merge_debug_count); + u64 host_ns = READ_ONCE(ifibss->merge_debug_last_host_ns); + u64 beacon_tsf = READ_ONCE(ifibss->merge_debug_last_beacon_tsf); + u64 rx_tsf = READ_ONCE(ifibss->merge_debug_last_rx_tsf); + s64 diff = READ_ONCE(ifibss->merge_debug_last_diff); + u8 from_rx_status = READ_ONCE(ifibss->merge_debug_last_from_rx_status); + u8 bssid[ETH_ALEN]; + u8 sa[ETH_ALEN]; + + ether_addr_copy(bssid, ifibss->merge_debug_last_bssid); + ether_addr_copy(sa, ifibss->merge_debug_last_sa); + + return scnprintf(buf, buflen, + "count=%u host_ns=%llu beacon_tsf=%llu rx_tsf=%llu diff=%lld source=%s bssid=%pM sa=%pM\n", + count, + (unsigned long long)host_ns, + (unsigned long long)beacon_tsf, + (unsigned long long)rx_tsf, + (long long)diff, + from_rx_status ? "rx_status" : "drv_get_tsf", + bssid, sa); +} +IEEE80211_IF_FILE_R(ibss_merge_state); IEEE80211_IF_FILE_RW(tsf); static ssize_t ieee80211_if_fmt_valid_links(const struct ieee80211_sub_if_data *sdata, @@ -690,31 +876,41 @@ IEEE80211_IF_FILE(dot11MeshConnectedToAuthServer, u.mesh.mshcfg.dot11MeshConnectedToAuthServer, DEC); #endif -static ssize_t mesh_macfilter_read( struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t macfilter_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) { struct ieee80211_sub_if_data *sdata = file->private_data; - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - + struct list_head *macfilter; + spinlock_t *macfilter_lock; char *buf; unsigned int offset = 0, buf_size = PAGE_SIZE; ssize_t r; - + + macfilter = ieee80211_macfilter_list(sdata); + macfilter_lock = ieee80211_macfilter_lock(sdata); + if (!macfilter || !macfilter_lock) + return -EOPNOTSUPP; + buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; - - struct mesh_macfilter_struct *entry; - // Traverse the macfilter and format the entries into a buffer - list_for_each_entry(entry, &ifmsh->macfilter, list){ - offset += scnprintf(buf + offset, buf_size - offset, - "MAC: %02x:%02x:%02x:%02x:%02x:%02x, Blocked: %s\n", - entry->mac_addr[0], entry->mac_addr[1], - entry->mac_addr[2], entry->mac_addr[3], - entry->mac_addr[4], entry->mac_addr[5], - entry->blocked ? "Yes" : "No"); + + spin_lock_bh(macfilter_lock); + { + struct mesh_macfilter_struct *entry; + + list_for_each_entry(entry, macfilter, list) { + offset += scnprintf(buf + offset, buf_size - offset, + "MAC: %02x:%02x:%02x:%02x:%02x:%02x, Blocked: %s\n", + entry->mac_addr[0], entry->mac_addr[1], + entry->mac_addr[2], entry->mac_addr[3], + entry->mac_addr[4], entry->mac_addr[5], + entry->blocked ? "Yes" : "No"); + if (offset >= buf_size) + break; + } } + spin_unlock_bh(macfilter_lock); r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); @@ -723,8 +919,60 @@ static ssize_t mesh_macfilter_read( struct file *file, return r; } -static const struct file_operations mesh_macfilter_ops = { - .read = mesh_macfilter_read, +static ssize_t macfilter_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sub_if_data *sdata = file->private_data; + char buf[64]; + u8 mac_addr[ETH_ALEN]; + char *cmd, *arg, *input; + + if (!ieee80211_macfilter_supported(sdata)) + return -EOPNOTSUPP; + + if (!count || count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = '\0'; + input = strim(buf); + if (!*input) + return -EINVAL; + + if (!strcmp(input, "clear")) { + ieee80211_macfilter_clear(sdata); + return count; + } + + cmd = strsep(&input, " \t"); + if (!input) + return -EINVAL; + + arg = strim(input); + if (!*arg || !mac_pton(arg, mac_addr)) + return -EINVAL; + + if (!strcmp(cmd, "add")) + ieee80211_macfilter_add_entry(sdata, mac_addr, false); + else if (!strcmp(cmd, "block")) + ieee80211_macfilter_add_entry(sdata, mac_addr, true); + else if (!strcmp(cmd, "del") || !strcmp(cmd, "remove")) + ieee80211_macfilter_remove_entry(sdata, mac_addr); + else + return -EINVAL; + + if (sdata->vif.type == NL80211_IFTYPE_ADHOC && + (strcmp(cmd, "del") && strcmp(cmd, "remove"))) + sta_info_destroy_addr_bss(sdata, mac_addr); + + return count; +} + +static const struct file_operations macfilter_ops = { + .read = macfilter_read, + .write = macfilter_write, .open = simple_open, .llseek = generic_file_llseek, }; @@ -758,6 +1006,10 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata) DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz); DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz); DEBUGFS_ADD(hw_queues); + debugfs_create_file("ptp_tx_path", 0400, sdata->vif.debugfs_dir, + sdata, &ptp_tx_path_ops); + debugfs_create_file("ptp_tx_path_reset", 0200, sdata->vif.debugfs_dir, + sdata, &ptp_tx_path_reset_ops); if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && sdata->vif.type != NL80211_IFTYPE_NAN) @@ -801,6 +1053,10 @@ static void add_vlan_files(struct ieee80211_sub_if_data *sdata) static void add_ibss_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD_MODE(tsf, 0600); + DEBUGFS_ADD_MODE(ibss_tsf_set_enable, 0600); + DEBUGFS_ADD(ibss_merge_state); + debugfs_create_file("macfilter", 0600, sdata->vif.debugfs_dir, + sdata, &macfilter_ops); } #ifdef CONFIG_MAC80211_MESH @@ -809,13 +1065,15 @@ static void add_mesh_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD_MODE(tsf, 0600); DEBUGFS_ADD_MODE(estab_plinks, 0400); + DEBUGFS_ADD_MODE(mesh_tsf_set_enable, 0600); } -// adding file for macfilter static void add_mesh_macfilter(struct ieee80211_sub_if_data *sdata) { - debugfs_create_file("mesh_macfilter", 0400, sdata->vif.debugfs_dir, - sdata, &mesh_macfilter_ops); + debugfs_create_file("mesh_macfilter", 0600, sdata->vif.debugfs_dir, + sdata, &macfilter_ops); + debugfs_create_file("macfilter", 0600, sdata->vif.debugfs_dir, + sdata, &macfilter_ops); } static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) @@ -859,6 +1117,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata) MESHPARAMS_ADD(dot11MeshHWMPRootMode); MESHPARAMS_ADD(dot11MeshHWMPRannInterval); MESHPARAMS_ADD(dot11MeshForwarding); + MESHPARAMS_ADD(ptp_forward); MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol); MESHPARAMS_ADD(rssi_threshold); MESHPARAMS_ADD(ht_opmode); @@ -888,12 +1147,11 @@ static void add_files(struct ieee80211_sub_if_data *sdata) switch (sdata->vif.type) { case NL80211_IFTYPE_MESH_POINT: #ifdef CONFIG_MAC80211_MESH - add_mesh_files(sdata); - add_mesh_stats(sdata); - add_mesh_config(sdata); - // dding file for macfilter - add_mesh_macfilter(sdata); -#endif + add_mesh_files(sdata); + add_mesh_stats(sdata); + add_mesh_config(sdata); + add_mesh_macfilter(sdata); + #endif break; case NL80211_IFTYPE_STATION: add_sta_files(sdata); @@ -946,6 +1204,7 @@ void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) sdata->local->hw.wiphy->debugfsdir); sdata->debugfs.subdir_stations = debugfs_create_dir("stations", sdata->vif.debugfs_dir); + ieee80211_ptp_tx_path_reset(sdata); add_files(sdata); if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index 30cd0c905a24..3ec431565c62 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -243,6 +243,14 @@ void drv_set_tsf(struct ieee80211_local *local, if (!check_sdata_in_driver(sdata)) return; + if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT && + !sdata->u.mesh.tsf_set_enabled) + return; + + if (sdata->vif.type == NL80211_IFTYPE_ADHOC && + !sdata->u.ibss.tsf_set_enabled) + return; + trace_drv_set_tsf(local, sdata, tsf); if (local->ops->set_tsf) local->ops->set_tsf(&local->hw, &sdata->vif, tsf); diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index c4505593ba7a..fb8c8bf00fbb 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -84,6 +84,40 @@ static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata, return rv; } +static inline int drv_get_ts_info(struct ieee80211_sub_if_data *sdata, + struct ethtool_ts_info *info) +{ + struct ieee80211_local *local = sdata->local; + + if (!local->ops->get_ts_info) + return -EOPNOTSUPP; + + return local->ops->get_ts_info(&local->hw, &sdata->vif, info); +} + +static inline int drv_hwtstamp_get(struct ieee80211_sub_if_data *sdata, + struct kernel_hwtstamp_config *cfg) +{ + struct ieee80211_local *local = sdata->local; + + if (!local->ops->hwtstamp_get) + return -EOPNOTSUPP; + + return local->ops->hwtstamp_get(&local->hw, &sdata->vif, cfg); +} + +static inline int drv_hwtstamp_set(struct ieee80211_sub_if_data *sdata, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + struct ieee80211_local *local = sdata->local; + + if (!local->ops->hwtstamp_set) + return -EOPNOTSUPP; + + return local->ops->hwtstamp_set(&local->hw, &sdata->vif, cfg, extack); +} + int drv_start(struct ieee80211_local *local); void drv_stop(struct ieee80211_local *local); diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c index a3830d925cc2..133c0fe6bb9a 100644 --- a/net/mac80211/ethtool.c +++ b/net/mac80211/ethtool.c @@ -8,6 +8,7 @@ * Copyright (C) 2018, 2022 Intel Corporation */ #include +#include #include #include "ieee80211_i.h" #include "sta_info.h" @@ -223,6 +224,36 @@ static int ieee80211_get_regs_len(struct net_device *dev) return 0; } +static int ieee80211_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int ret; + + ret = drv_get_ts_info(sdata, info); + if (ret) + return ethtool_op_get_ts_info(dev, info); + + return 0; +} + +static int ieee80211_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.port = PORT_OTHER; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.mdio_support = 0; + + return 0; +} + static void ieee80211_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *data) @@ -238,6 +269,8 @@ const struct ethtool_ops ieee80211_ethtool_ops = { .get_regs_len = ieee80211_get_regs_len, .get_regs = ieee80211_get_regs, .get_link = ethtool_op_get_link, + .get_ts_info = ieee80211_get_ts_info, + .get_link_ksettings = ieee80211_get_link_ksettings, .get_ringparam = ieee80211_get_ringparam, .set_ringparam = ieee80211_set_ringparam, .get_strings = ieee80211_get_strings, diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 5542c93edfba..ac8823b08f17 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -34,6 +35,24 @@ #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 +static void +ieee80211_ibss_note_merge(struct ieee80211_sub_if_data *sdata, + const struct ieee80211_mgmt *mgmt, + u64 beacon_timestamp, u64 rx_timestamp, + bool from_rx_status) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + + ifibss->merge_debug_count++; + ifibss->merge_debug_last_host_ns = ktime_get_ns(); + ifibss->merge_debug_last_beacon_tsf = beacon_timestamp; + ifibss->merge_debug_last_rx_tsf = rx_timestamp; + ifibss->merge_debug_last_diff = (s64)beacon_timestamp - (s64)rx_timestamp; + ifibss->merge_debug_last_from_rx_status = from_rx_status; + ether_addr_copy(ifibss->merge_debug_last_bssid, mgmt->bssid); + ether_addr_copy(ifibss->merge_debug_last_sa, mgmt->sa); +} + static struct beacon_data * ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata, const int beacon_int, const u32 basic_rates, @@ -1027,6 +1046,10 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, rates_updated = true; } } else { + if (ieee80211_macfilter_check(sdata, mgmt->sa)) { + rcu_read_unlock(); + return; + } rcu_read_unlock(); sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); @@ -1112,6 +1135,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss *bss; struct ieee80211_channel *channel; u64 beacon_timestamp, rx_timestamp; + bool from_rx_status = false; u32 supp_rates = 0; enum nl80211_band band = rx_status->band; @@ -1165,6 +1189,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, rx_timestamp = ieee80211_calculate_rx_timestamp(local, rx_status, len + FCS_LEN, 24); + from_rx_status = true; } else { /* * second best option: get current TSF @@ -1182,13 +1207,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, jiffies); if (beacon_timestamp > rx_timestamp) { + ieee80211_ibss_note_merge(sdata, mgmt, beacon_timestamp, + rx_timestamp, from_rx_status); ibss_dbg(sdata, "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n", mgmt->bssid); ieee80211_sta_join_ibss(sdata, bss); - supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); - ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, - supp_rates); + if (!ieee80211_macfilter_check(sdata, mgmt->sa)) { + supp_rates = ieee80211_sta_get_rates(sdata, elems, band, + NULL); + ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, + supp_rates); + } rcu_read_unlock(); } @@ -1224,6 +1254,9 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) return; + if (ieee80211_macfilter_check(sdata, addr)) + return; + rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) { @@ -1732,7 +1765,10 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + ifibss->tsf_set_enabled = true; timer_setup(&ifibss->timer, ieee80211_ibss_timer, 0); + INIT_LIST_HEAD(&ifibss->macfilter); + spin_lock_init(&ifibss->macfilter_lock); INIT_LIST_HEAD(&ifibss->incomplete_stations); spin_lock_init(&ifibss->incomplete_lock); wiphy_work_init(&ifibss->csa_connection_drop_work, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ac78a3e9bf66..29e8ef606879 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -40,6 +40,18 @@ extern const struct cfg80211_ops mac80211_config_ops; struct ieee80211_local; struct ieee80211_mesh_fast_tx; +#ifdef CONFIG_MAC80211_DEBUGFS +enum ieee80211_ptp_tx_path_kind { + IEEE80211_PTP_TX_PATH_NONE, + IEEE80211_PTP_TX_PATH_SLOW, + IEEE80211_PTP_TX_PATH_STA_FAST, + IEEE80211_PTP_TX_PATH_MESH_FAST, +}; + +struct ieee80211_sub_if_data; +void ieee80211_ptp_tx_path_reset(struct ieee80211_sub_if_data *sdata); +#endif + /* Maximum number of broadcast/multicast frames to buffer when some of the * associated stations are using power saving. */ #define AP_MAX_BC_BUFFER 128 @@ -565,6 +577,8 @@ struct ieee80211_if_managed { struct ieee80211_if_ibss { struct timer_list timer; struct wiphy_work csa_connection_drop_work; + struct list_head macfilter; + spinlock_t macfilter_lock; unsigned long last_scan_completed; @@ -576,6 +590,7 @@ struct ieee80211_if_ibss { bool control_port; bool userspace_handles_dfs; + bool tsf_set_enabled; u8 bssid[ETH_ALEN] __aligned(2); u8 ssid[IEEE80211_MAX_SSID_LEN]; @@ -592,6 +607,14 @@ struct ieee80211_if_ibss { spinlock_t incomplete_lock; struct list_head incomplete_stations; + u32 merge_debug_count; + u64 merge_debug_last_host_ns; + u64 merge_debug_last_beacon_tsf; + u64 merge_debug_last_rx_tsf; + s64 merge_debug_last_diff; + u8 merge_debug_last_from_rx_status; + u8 merge_debug_last_bssid[ETH_ALEN] __aligned(2); + u8 merge_debug_last_sa[ETH_ALEN] __aligned(2); enum { IEEE80211_IBSS_MLME_SEARCH, @@ -699,6 +722,10 @@ struct ieee80211_if_mesh { u8 mesh_cc_id; /* Synchronization Protocol Identifier */ u8 mesh_sp_id; + /* Allow TSF set via drv_set_tsf() */ + bool tsf_set_enabled; + /* Forward PTP frames in mesh data path */ + bool ptp_forward; /* Authentication Protocol Identifier */ u8 mesh_auth_id; /* Local mesh Sequence Number */ @@ -758,6 +785,7 @@ struct ieee80211_if_mesh { int mesh_paths_generation; int mpp_paths_generation; struct mesh_tx_cache tx_cache; + }; #ifdef CONFIG_MAC80211_MESH @@ -1119,6 +1147,28 @@ struct ieee80211_sub_if_data { u16 desired_active_links; #ifdef CONFIG_MAC80211_DEBUGFS + struct { + atomic64_t seen; + atomic64_t event_seen; + atomic64_t slow; + atomic64_t mesh_fast; + atomic64_t sta_fast; + atomic64_t mesh_fast_eligible; + atomic64_t mesh_fast_fallback; + atomic64_t sta_present; + atomic64_t sta_fast_present; + atomic64_t sta_fast_fallback; + u16 last_seq; + u16 last_ethertype; + u8 last_msgtype; + u8 last_path; + u8 last_iftype; + u8 last_is_event; + u8 last_sta_present; + u8 last_sta_fast_present; + u8 last_mesh_fast_eligible; + } ptp_tx_path; + struct { struct dentry *subdir_stations; struct dentry *default_unicast_key; @@ -2616,6 +2666,16 @@ const char *ieee80211_get_reason_code_string(u16 reason_code); u16 ieee80211_encode_usf(int val); u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, enum nl80211_iftype type); +bool ieee80211_macfilter_supported(struct ieee80211_sub_if_data *sdata); +struct list_head *ieee80211_macfilter_list(struct ieee80211_sub_if_data *sdata); +spinlock_t *ieee80211_macfilter_lock(struct ieee80211_sub_if_data *sdata); +void ieee80211_macfilter_add_entry(struct ieee80211_sub_if_data *sdata, + const u8 mac_addr[ETH_ALEN], bool blocked); +void ieee80211_macfilter_remove_entry(struct ieee80211_sub_if_data *sdata, + const u8 mac_addr[ETH_ALEN]); +void ieee80211_macfilter_clear(struct ieee80211_sub_if_data *sdata); +bool ieee80211_macfilter_check(struct ieee80211_sub_if_data *sdata, + const u8 mac_addr[ETH_ALEN]); extern const struct ethtool_ops ieee80211_ethtool_ops; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index be586bc0b5b7..26349617602b 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -822,6 +823,23 @@ static int ieee80211_netdev_setup_tc(struct net_device *dev, return drv_net_setup_tc(local, sdata, dev, type, type_data); } +static int ieee80211_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return drv_hwtstamp_set(sdata, cfg, extack); +} + +static int ieee80211_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return drv_hwtstamp_get(sdata, cfg); +} + static const struct net_device_ops ieee80211_dataif_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, @@ -831,6 +849,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { .ndo_set_mac_address = ieee80211_change_mac, .ndo_get_stats64 = ieee80211_get_stats64, .ndo_setup_tc = ieee80211_netdev_setup_tc, + .ndo_hwtstamp_set = ieee80211_hwtstamp_set, + .ndo_hwtstamp_get = ieee80211_hwtstamp_get, }; static u16 ieee80211_monitor_select_queue(struct net_device *dev, diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 61ec402e6b6e..eff8360fcf17 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1785,6 +1785,8 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) ieee80211_mesh_housekeeping_timer, 0); ifmsh->accepting_plinks = true; + ifmsh->tsf_set_enabled = true; + ifmsh->ptp_forward = true; atomic_set(&ifmsh->mpaths, 0); mesh_rmc_init(sdata); ifmsh->last_preq = jiffies; @@ -1810,6 +1812,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) RCU_INIT_POINTER(ifmsh->beacon, NULL); sdata->vif.bss_conf.bssid = zero_addr; + } void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 5ffd9389aaae..a8ca85a7d0fb 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -13,7 +13,6 @@ #include #include "ieee80211_i.h" - /* Data structures */ /** * enum mesh_path_flags - mac80211 mesh path flags @@ -313,11 +312,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_rx_status *rx_status); void mesh_sta_cleanup(struct sta_info *sta); -void mesh_add_macfilter_entry(struct ieee80211_sub_if_data *sdata, const u8 mac_addr[ETH_ALEN], bool blocked); -void mesh_remove_macfilter_entry(struct ieee80211_sub_if_data *sdata, const u8 mac_addr[ETH_ALEN]); -void mesh_clear_macfilter(struct ieee80211_sub_if_data *sdata); -bool mesh_check_macfilter(struct ieee80211_sub_if_data *sdata, const u8 mac_addr[ETH_ALEN]); - /* Private interfaces */ /* Mesh paths */ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, @@ -397,6 +391,7 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) {} static inline void ieee80211s_stop(void) {} + #endif #endif /* IEEE80211S_H */ diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 87b333c3f80a..3c04a3506843 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -55,86 +55,6 @@ static const char * const mplevents[] = { [CLS_IGNR] = "CLS_IGNR" }; -void mesh_add_macfilter_entry(struct ieee80211_sub_if_data *sdata, const u8 mac_addr[ETH_ALEN], bool blocked){ - struct mesh_macfilter_struct *new_entry, *tmp; - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - - list_for_each_entry_safe(new_entry, tmp, &ifmsh->macfilter, list){ - if(memcmp(new_entry->mac_addr, mac_addr, sizeof(new_entry->mac_addr)) == 0){ - return; - } - } - - // Allocate memory for the new entry - new_entry = kmalloc(sizeof(struct mesh_macfilter_struct), GFP_KERNEL); - if(!new_entry){ - pr_err("Failed to allocate memory for new macfilter entry\n"); - return; - } - - // Copy MAC address and set blacked flag - memcpy(new_entry->mac_addr, mac_addr, sizeof(new_entry->mac_addr)); - new_entry->blocked = blocked; - - // Add entry to the llinked list - list_add(&new_entry->list, &ifmsh->macfilter); - - // pr_info("macfilter entry added: %02x:%02x:%02x:%02x:%02x:%02x, Blocked: %s\n", - // mac_addr[0], mac_addr[1], mac_addr[2], - // mac_addr[3], mac_addr[4], mac_addr[5], - // blocked ? "Yes" : "No"); - return; -} - -void mesh_remove_macfilter_entry(struct ieee80211_sub_if_data *sdata, const u8 mac_addr[ETH_ALEN]){ - struct mesh_macfilter_struct *entry, *tmp; - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - // Traverse the linked list - list_for_each_entry_safe(entry, tmp, &ifmsh->macfilter, list){ - if(memcmp(entry->mac_addr, mac_addr, sizeof(entry->mac_addr)) == 0){ - // Remove the entry from the list - list_del(&entry->list); - - // pr_info("macfilter entry removed: %02x:%02x:%02x:%02x:%02x:%02x\n", - // mac_addr[0], mac_addr[1], mac_addr[2], - // mac_addr[3], mac_addr[4], mac_addr[5]); - - // Free memory allocated for the entry - kfree(entry); - return; - } - } - return; -} - -void mesh_clear_macfilter(struct ieee80211_sub_if_data *sdata){ - struct mesh_macfilter_struct *entry, *tmp; - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - // Traverse the macfilter and remove all entries - list_for_each_entry_safe(entry, tmp, &ifmsh->macfilter, list){ - list_del(&entry->list); - kfree(entry); - } - return; -} - -bool mesh_check_macfilter(struct ieee80211_sub_if_data *sdata, const u8 mac_addr[ETH_ALEN]){ - struct mesh_macfilter_struct *entry; - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - - // Traverse the macfilter and check if the MAC address is present - list_for_each_entry(entry, &ifmsh->macfilter, list){ - if(memcmp(entry->mac_addr, mac_addr, sizeof(entry->mac_addr)) == 0){ - // pr_info("MAC address %02x:%02x:%02x:%02x:%02x:%02x found in the macfilter\n", - // mac_addr[0], mac_addr[1], mac_addr[2], - // mac_addr[3], mac_addr[4], mac_addr[5]); - entry->blocked = true; - return true; - } - } - return false; -} - /* We only need a valid sta if user configured a minimum rssi_threshold. */ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) @@ -506,6 +426,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband; u32 rates, basic_rates = 0, changed = 0; enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth; + bool should_check_macfilter = false; bool mac_filtered = false; sband = ieee80211_get_sband(sdata); @@ -563,23 +484,16 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, rate_control_rate_init(sta); else rate_control_rate_update(local, sband, sta, 0, changed); - - if(sta->mesh->plink_state != NL80211_PLINK_BLOCKED){ - spin_lock_bh(&sdata->u.mesh.macfilter_lock); - mac_filtered = mesh_check_macfilter(sdata, sta->sta.addr); - spin_unlock_bh(&sdata->u.mesh.macfilter_lock); - - //need to unlock here, because mesh_plink_block also takes plink_lock - spin_unlock_bh(&sta->mesh->plink_lock); + should_check_macfilter = + sta->mesh->plink_state != NL80211_PLINK_BLOCKED; +out: + spin_unlock_bh(&sta->mesh->plink_lock); - if(mac_filtered){ - // pr_info("DEBUG: mesh_plink_block (called from fn mesh_sta_info_init)"); + if (should_check_macfilter) { + mac_filtered = ieee80211_macfilter_check(sdata, sta->sta.addr); + if (mac_filtered) mesh_plink_block(sta); - } - return; } -out: - spin_unlock_bh(&sta->mesh->plink_lock); } static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata) @@ -889,10 +803,7 @@ u64 mesh_plink_open(struct sta_info *sta) if (!test_sta_flag(sta, WLAN_STA_AUTH)) return 0; - // pr_info("DEBUG: macfilter remove entry (called from mesh_plink_open)"); - spin_lock_bh(&sdata->u.mesh.macfilter_lock); - mesh_remove_macfilter_entry(sdata, sta->sta.addr); - spin_unlock_bh(&sdata->u.mesh.macfilter_lock); + ieee80211_macfilter_remove_entry(sdata, sta->sta.addr); spin_lock_bh(&sta->mesh->plink_lock); sta->mesh->llid = mesh_get_new_llid(sdata); @@ -921,11 +832,7 @@ u64 mesh_plink_block(struct sta_info *sta) struct ieee80211_sub_if_data *sdata = sta->sdata; u64 changed; - // pr_info("DEBUG: macfilter add (called from mesh_plink.c fn mesh_plink_block)"); - - spin_lock_bh(&sdata->u.mesh.macfilter_lock); - mesh_add_macfilter_entry(sdata, sta->sta.addr, true); - spin_unlock_bh(&sdata->u.mesh.macfilter_lock); + ieee80211_macfilter_add_entry(sdata, sta->sta.addr, true); changed = mesh_plink_deactivate(sta); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 8f6b6f56b65b..998ee1f95afd 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -15,7 +15,11 @@ #include #include #include +#include +#include +#include #include +#include #include #include #include @@ -2721,6 +2725,57 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) } #ifdef CONFIG_MAC80211_MESH +static bool ieee80211_mesh_is_ptp(const struct sk_buff *skb, int mesh_hdrlen) +{ + const struct ethhdr *eth = (const struct ethhdr *)skb->data; + unsigned int off = sizeof(*eth) + mesh_hdrlen; + struct vlan_hdr _vh, *vh; + struct iphdr _iph, *iph; + struct ipv6hdr _ip6h, *ip6h; + struct udphdr _uh, *uh; + __be16 proto = eth->h_proto; + + if (proto == htons(ETH_P_1588)) + return true; + + if (proto == htons(ETH_P_8021Q) || proto == htons(ETH_P_8021AD)) { + vh = skb_header_pointer(skb, off, sizeof(_vh), &_vh); + if (!vh) + return false; + proto = vh->h_vlan_encapsulated_proto; + off += sizeof(*vh); + } + + if (proto == htons(ETH_P_1588)) + return true; + + if (proto == htons(ETH_P_IP)) { + iph = skb_header_pointer(skb, off, sizeof(_iph), &_iph); + if (!iph || iph->ihl < 5) + return false; + if (iph->protocol != IPPROTO_UDP) + return false; + off += iph->ihl * 4; + uh = skb_header_pointer(skb, off, sizeof(_uh), &_uh); + if (!uh) + return false; + return uh->dest == htons(319) || uh->dest == htons(320); + } + + if (proto == htons(ETH_P_IPV6)) { + ip6h = skb_header_pointer(skb, off, sizeof(_ip6h), &_ip6h); + if (!ip6h || ip6h->nexthdr != IPPROTO_UDP) + return false; + off += sizeof(*ip6h); + uh = skb_header_pointer(skb, off, sizeof(_uh), &_uh); + if (!uh) + return false; + return uh->dest == htons(319) || uh->dest == htons(320); + } + + return false; +} + static bool ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int hdrlen) @@ -2881,6 +2936,13 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta return RX_DROP_MONITOR; } + if (!ifmsh->ptp_forward && + ieee80211_mesh_is_ptp(skb, mesh_hdrlen)) { + if (multicast) + goto rx_accept; + return RX_DROP_MONITOR; + } + skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]); if (!multicast && diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index d45d4be63dd8..9908aba5adff 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -16,7 +16,10 @@ #include #include #include +#include +#include #include +#include #include #include #include @@ -39,6 +42,166 @@ /* misc utils */ +#ifdef CONFIG_MAC80211_DEBUGFS +struct ieee80211_ptp_tx_obs { + bool valid; + bool is_event; + u8 msgtype; + u16 seqid; + u16 ethertype; +}; + +void +ieee80211_ptp_tx_path_reset(struct ieee80211_sub_if_data *sdata) +{ + atomic64_set(&sdata->ptp_tx_path.seen, 0); + atomic64_set(&sdata->ptp_tx_path.event_seen, 0); + atomic64_set(&sdata->ptp_tx_path.slow, 0); + atomic64_set(&sdata->ptp_tx_path.mesh_fast, 0); + atomic64_set(&sdata->ptp_tx_path.sta_fast, 0); + atomic64_set(&sdata->ptp_tx_path.mesh_fast_eligible, 0); + atomic64_set(&sdata->ptp_tx_path.mesh_fast_fallback, 0); + atomic64_set(&sdata->ptp_tx_path.sta_present, 0); + atomic64_set(&sdata->ptp_tx_path.sta_fast_present, 0); + atomic64_set(&sdata->ptp_tx_path.sta_fast_fallback, 0); + WRITE_ONCE(sdata->ptp_tx_path.last_seq, 0); + WRITE_ONCE(sdata->ptp_tx_path.last_ethertype, 0); + WRITE_ONCE(sdata->ptp_tx_path.last_msgtype, 0); + WRITE_ONCE(sdata->ptp_tx_path.last_path, IEEE80211_PTP_TX_PATH_NONE); + WRITE_ONCE(sdata->ptp_tx_path.last_iftype, 0); + WRITE_ONCE(sdata->ptp_tx_path.last_is_event, 0); + WRITE_ONCE(sdata->ptp_tx_path.last_sta_present, 0); + WRITE_ONCE(sdata->ptp_tx_path.last_sta_fast_present, 0); + WRITE_ONCE(sdata->ptp_tx_path.last_mesh_fast_eligible, 0); +} + +static bool +ieee80211_tx_parse_ptp_8023(const struct sk_buff *skb, + struct ieee80211_ptp_tx_obs *obs) +{ + const u8 *data = skb->data; + unsigned int len = skb->len; + unsigned int off = ETH_HLEN; + u16 ethertype; + + memset(obs, 0, sizeof(*obs)); + + if (len < ETH_HLEN) + return false; + + ethertype = ntohs(get_unaligned_be16(data + offsetof(struct ethhdr, h_proto))); + + while (ethertype == ETH_P_8021Q || ethertype == ETH_P_8021AD) { + if (len < off + VLAN_HLEN) + return false; + ethertype = ntohs(get_unaligned_be16(data + off + 2)); + off += VLAN_HLEN; + } + + if (ethertype == ETH_P_IP) { + const struct iphdr *iph; + unsigned int ihl; + + if (len < off + sizeof(*iph)) + return false; + iph = (const struct iphdr *)(data + off); + if (iph->version != 4) + return false; + ihl = iph->ihl * 4; + if (ihl < sizeof(*iph) || len < off + ihl) + return false; + if (iph->protocol != IPPROTO_UDP) + return false; + off += ihl; + } else if (ethertype == ETH_P_IPV6) { + const struct ipv6hdr *ip6h; + + if (len < off + sizeof(*ip6h)) + return false; + ip6h = (const struct ipv6hdr *)(data + off); + if (ip6h->nexthdr != IPPROTO_UDP) + return false; + off += sizeof(*ip6h); + } else { + return false; + } + + if (len < off + sizeof(struct udphdr) + 34) + return false; + + { + const struct udphdr *uh = (const struct udphdr *)(data + off); + u16 sport = ntohs(uh->source); + u16 dport = ntohs(uh->dest); + + if ((sport != 319 && sport != 320) && + (dport != 319 && dport != 320)) + return false; + off += sizeof(*uh); + } + + obs->valid = true; + obs->msgtype = data[off] & 0x0f; + obs->is_event = obs->msgtype <= 3; + obs->seqid = ntohs(get_unaligned_be16(data + off + 30)); + obs->ethertype = ethertype; + return true; +} + +static void +ieee80211_ptp_tx_path_record(struct ieee80211_sub_if_data *sdata, + const struct ieee80211_ptp_tx_obs *obs, + enum ieee80211_ptp_tx_path_kind path, + bool sta_present, bool sta_fast_present, + bool mesh_fast_eligible, + bool mesh_fast_fallback, + bool sta_fast_fallback) +{ + if (!obs->valid) + return; + + atomic64_inc(&sdata->ptp_tx_path.seen); + if (obs->is_event) + atomic64_inc(&sdata->ptp_tx_path.event_seen); + if (sta_present) + atomic64_inc(&sdata->ptp_tx_path.sta_present); + if (sta_fast_present) + atomic64_inc(&sdata->ptp_tx_path.sta_fast_present); + if (mesh_fast_eligible) + atomic64_inc(&sdata->ptp_tx_path.mesh_fast_eligible); + if (mesh_fast_fallback) + atomic64_inc(&sdata->ptp_tx_path.mesh_fast_fallback); + if (sta_fast_fallback) + atomic64_inc(&sdata->ptp_tx_path.sta_fast_fallback); + + switch (path) { + case IEEE80211_PTP_TX_PATH_SLOW: + atomic64_inc(&sdata->ptp_tx_path.slow); + break; + case IEEE80211_PTP_TX_PATH_STA_FAST: + atomic64_inc(&sdata->ptp_tx_path.sta_fast); + break; + case IEEE80211_PTP_TX_PATH_MESH_FAST: + atomic64_inc(&sdata->ptp_tx_path.mesh_fast); + break; + default: + break; + } + + WRITE_ONCE(sdata->ptp_tx_path.last_seq, obs->seqid); + WRITE_ONCE(sdata->ptp_tx_path.last_ethertype, obs->ethertype); + WRITE_ONCE(sdata->ptp_tx_path.last_msgtype, obs->msgtype); + WRITE_ONCE(sdata->ptp_tx_path.last_path, path); + WRITE_ONCE(sdata->ptp_tx_path.last_iftype, sdata->vif.type); + WRITE_ONCE(sdata->ptp_tx_path.last_is_event, obs->is_event ? 1 : 0); + WRITE_ONCE(sdata->ptp_tx_path.last_sta_present, sta_present ? 1 : 0); + WRITE_ONCE(sdata->ptp_tx_path.last_sta_fast_present, + sta_fast_present ? 1 : 0); + WRITE_ONCE(sdata->ptp_tx_path.last_mesh_fast_eligible, + mesh_fast_eligible ? 1 : 0); +} +#endif + static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, struct sk_buff *skb, int group_addr, int next_frag_len) @@ -4254,20 +4417,48 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, struct sta_info *sta; struct sk_buff *next; int len = skb->len; +#ifdef CONFIG_MAC80211_DEBUGFS + struct ieee80211_ptp_tx_obs ptp_tx_obs; + bool ptp_is_ptp; + bool ptp_mesh_fast_eligible = false; + bool ptp_mesh_fast_fallback = false; + bool ptp_sta_present = false; + bool ptp_sta_fast_present = false; + bool ptp_sta_fast_fallback = false; +#endif if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) { kfree_skb(skb); return; } +#ifdef CONFIG_MAC80211_DEBUGFS + ptp_is_ptp = ieee80211_tx_parse_ptp_8023(skb, &ptp_tx_obs); +#endif + sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift); rcu_read_lock(); if (ieee80211_vif_is_mesh(&sdata->vif) && - ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT) && - ieee80211_mesh_xmit_fast(sdata, skb, ctrl_flags)) - goto out; + ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT)) { +#ifdef CONFIG_MAC80211_DEBUGFS + ptp_mesh_fast_eligible = ptp_is_ptp; +#endif + if (ieee80211_mesh_xmit_fast(sdata, skb, ctrl_flags)) { +#ifdef CONFIG_MAC80211_DEBUGFS + ieee80211_ptp_tx_path_record(sdata, &ptp_tx_obs, + IEEE80211_PTP_TX_PATH_MESH_FAST, + false, false, + ptp_mesh_fast_eligible, + false, false); +#endif + goto out; + } +#ifdef CONFIG_MAC80211_DEBUGFS + ptp_mesh_fast_fallback = ptp_is_ptp; +#endif + } if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) goto out_free; @@ -4275,6 +4466,10 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, if (IS_ERR(sta)) sta = NULL; +#ifdef CONFIG_MAC80211_DEBUGFS + ptp_sta_present = ptp_is_ptp && sta; +#endif + skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, sta, skb)); ieee80211_aggr_check(sdata, sta, skb); @@ -4282,12 +4477,38 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, struct ieee80211_fast_tx *fast_tx; fast_tx = rcu_dereference(sta->fast_tx); +#ifdef CONFIG_MAC80211_DEBUGFS + ptp_sta_fast_present = ptp_is_ptp && fast_tx; +#endif - if (fast_tx && - ieee80211_xmit_fast(sdata, sta, fast_tx, skb)) - goto out; + if (fast_tx) { + if (ieee80211_xmit_fast(sdata, sta, fast_tx, skb)) { +#ifdef CONFIG_MAC80211_DEBUGFS + ieee80211_ptp_tx_path_record(sdata, &ptp_tx_obs, + IEEE80211_PTP_TX_PATH_STA_FAST, + ptp_sta_present, + ptp_sta_fast_present, + ptp_mesh_fast_eligible, + ptp_mesh_fast_fallback, + false); +#endif + goto out; + } +#ifdef CONFIG_MAC80211_DEBUGFS + ptp_sta_fast_fallback = ptp_is_ptp; +#endif + } } +#ifdef CONFIG_MAC80211_DEBUGFS + ieee80211_ptp_tx_path_record(sdata, &ptp_tx_obs, + IEEE80211_PTP_TX_PATH_SLOW, + ptp_sta_present, ptp_sta_fast_present, + ptp_mesh_fast_eligible, + ptp_mesh_fast_fallback, + ptp_sta_fast_fallback); +#endif + /* the frame could be fragmented, software-encrypted, and other * things so we cannot really handle checksum or GSO offload. * fix it up in software before we handle anything else. diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8a6917cf63cf..92954204fa81 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -36,6 +36,137 @@ /* privid for wiphys to determine whether they belong to us or not */ const void *const mac80211_wiphy_privid = &mac80211_wiphy_privid; +struct list_head *ieee80211_macfilter_list(struct ieee80211_sub_if_data *sdata) +{ + switch (sdata->vif.type) { + case NL80211_IFTYPE_MESH_POINT: + return &sdata->u.mesh.macfilter; + case NL80211_IFTYPE_ADHOC: + return &sdata->u.ibss.macfilter; + default: + return NULL; + } +} + +spinlock_t *ieee80211_macfilter_lock(struct ieee80211_sub_if_data *sdata) +{ + switch (sdata->vif.type) { + case NL80211_IFTYPE_MESH_POINT: + return &sdata->u.mesh.macfilter_lock; + case NL80211_IFTYPE_ADHOC: + return &sdata->u.ibss.macfilter_lock; + default: + return NULL; + } +} + +bool ieee80211_macfilter_supported(struct ieee80211_sub_if_data *sdata) +{ + return ieee80211_macfilter_list(sdata) && + ieee80211_macfilter_lock(sdata); +} + +void ieee80211_macfilter_add_entry(struct ieee80211_sub_if_data *sdata, + const u8 mac_addr[ETH_ALEN], bool blocked) +{ + struct mesh_macfilter_struct *entry; + struct list_head *macfilter; + spinlock_t *macfilter_lock; + + macfilter = ieee80211_macfilter_list(sdata); + macfilter_lock = ieee80211_macfilter_lock(sdata); + if (!macfilter || !macfilter_lock) + return; + + spin_lock_bh(macfilter_lock); + list_for_each_entry(entry, macfilter, list) { + if (ether_addr_equal(entry->mac_addr, mac_addr)) { + entry->blocked |= blocked; + spin_unlock_bh(macfilter_lock); + return; + } + } + + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) { + spin_unlock_bh(macfilter_lock); + pr_err("Failed to allocate memory for new macfilter entry\n"); + return; + } + + ether_addr_copy(entry->mac_addr, mac_addr); + entry->blocked = blocked; + list_add(&entry->list, macfilter); + spin_unlock_bh(macfilter_lock); +} + +void ieee80211_macfilter_remove_entry(struct ieee80211_sub_if_data *sdata, + const u8 mac_addr[ETH_ALEN]) +{ + struct mesh_macfilter_struct *entry, *tmp; + struct list_head *macfilter; + spinlock_t *macfilter_lock; + + macfilter = ieee80211_macfilter_list(sdata); + macfilter_lock = ieee80211_macfilter_lock(sdata); + if (!macfilter || !macfilter_lock) + return; + + spin_lock_bh(macfilter_lock); + list_for_each_entry_safe(entry, tmp, macfilter, list) { + if (ether_addr_equal(entry->mac_addr, mac_addr)) { + list_del(&entry->list); + kfree(entry); + break; + } + } + spin_unlock_bh(macfilter_lock); +} + +void ieee80211_macfilter_clear(struct ieee80211_sub_if_data *sdata) +{ + struct mesh_macfilter_struct *entry, *tmp; + struct list_head *macfilter; + spinlock_t *macfilter_lock; + + macfilter = ieee80211_macfilter_list(sdata); + macfilter_lock = ieee80211_macfilter_lock(sdata); + if (!macfilter || !macfilter_lock) + return; + + spin_lock_bh(macfilter_lock); + list_for_each_entry_safe(entry, tmp, macfilter, list) { + list_del(&entry->list); + kfree(entry); + } + spin_unlock_bh(macfilter_lock); +} + +bool ieee80211_macfilter_check(struct ieee80211_sub_if_data *sdata, + const u8 mac_addr[ETH_ALEN]) +{ + struct mesh_macfilter_struct *entry; + struct list_head *macfilter; + spinlock_t *macfilter_lock; + bool blocked = false; + + macfilter = ieee80211_macfilter_list(sdata); + macfilter_lock = ieee80211_macfilter_lock(sdata); + if (!macfilter || !macfilter_lock) + return false; + + spin_lock_bh(macfilter_lock); + list_for_each_entry(entry, macfilter, list) { + if (ether_addr_equal(entry->mac_addr, mac_addr)) { + blocked = true; + break; + } + } + spin_unlock_bh(macfilter_lock); + + return blocked; +} + struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) { struct ieee80211_local *local;