1use cvx_core::error::AnalyticsError;
13
14const MIN_POINTS_VELOCITY: usize = 2;
16const MIN_POINTS_ACCELERATION: usize = 3;
18const MIN_POINTS_STOCHASTIC: usize = 10;
20
21pub fn velocity(trajectory: &[(i64, &[f32])], target: i64) -> Result<Vec<f32>, AnalyticsError> {
30 if trajectory.len() < MIN_POINTS_VELOCITY {
31 return Err(AnalyticsError::InsufficientData {
32 needed: MIN_POINTS_VELOCITY,
33 have: trajectory.len(),
34 });
35 }
36
37 let dim = trajectory[0].1.len();
38
39 let idx = find_closest_index(trajectory, target);
41
42 let (t0, v0, t1, v1) = if idx == 0 {
43 (
45 trajectory[0].0,
46 trajectory[0].1,
47 trajectory[1].0,
48 trajectory[1].1,
49 )
50 } else if idx >= trajectory.len() - 1 {
51 let n = trajectory.len();
52 (
53 trajectory[n - 2].0,
54 trajectory[n - 2].1,
55 trajectory[n - 1].0,
56 trajectory[n - 1].1,
57 )
58 } else {
59 (
61 trajectory[idx - 1].0,
62 trajectory[idx - 1].1,
63 trajectory[idx + 1].0,
64 trajectory[idx + 1].1,
65 )
66 };
67
68 let dt = (t1 - t0) as f64;
69 if dt == 0.0 {
70 return Ok(vec![0.0; dim]);
71 }
72
73 let vel: Vec<f32> = (0..dim)
74 .map(|d| ((v1[d] as f64 - v0[d] as f64) / dt) as f32)
75 .collect();
76
77 Ok(vel)
78}
79
80pub fn acceleration(trajectory: &[(i64, &[f32])], target: i64) -> Result<Vec<f32>, AnalyticsError> {
84 if trajectory.len() < MIN_POINTS_ACCELERATION {
85 return Err(AnalyticsError::InsufficientData {
86 needed: MIN_POINTS_ACCELERATION,
87 have: trajectory.len(),
88 });
89 }
90
91 let dim = trajectory[0].1.len();
92 let idx = find_closest_index(trajectory, target).clamp(1, trajectory.len() - 2);
93
94 let (t_prev, v_prev) = (trajectory[idx - 1].0, trajectory[idx - 1].1);
95 let (t_curr, v_curr) = (trajectory[idx].0, trajectory[idx].1);
96 let (t_next, v_next) = (trajectory[idx + 1].0, trajectory[idx + 1].1);
97
98 let dt1 = (t_curr - t_prev) as f64;
99 let dt2 = (t_next - t_curr) as f64;
100 let dt_avg = (dt1 + dt2) / 2.0;
101
102 if dt_avg == 0.0 || dt1 == 0.0 || dt2 == 0.0 {
103 return Ok(vec![0.0; dim]);
104 }
105
106 let acc: Vec<f32> = (0..dim)
107 .map(|d| {
108 let vel1 = (v_curr[d] as f64 - v_prev[d] as f64) / dt1;
109 let vel2 = (v_next[d] as f64 - v_curr[d] as f64) / dt2;
110 ((vel2 - vel1) / dt_avg) as f32
111 })
112 .collect();
113
114 Ok(acc)
115}
116
117pub fn drift_magnitude_l2(v1: &[f32], v2: &[f32]) -> f32 {
121 assert_eq!(v1.len(), v2.len(), "dimension mismatch");
122 let sum_sq: f32 = v1
123 .iter()
124 .zip(v2.iter())
125 .map(|(a, b)| (a - b) * (a - b))
126 .sum();
127 sum_sq.sqrt()
128}
129
130pub fn drift_magnitude_cosine(v1: &[f32], v2: &[f32]) -> f32 {
132 assert_eq!(v1.len(), v2.len(), "dimension mismatch");
133 let dot: f32 = v1.iter().zip(v2.iter()).map(|(a, b)| a * b).sum();
134 let norm1: f32 = v1.iter().map(|x| x * x).sum::<f32>().sqrt();
135 let norm2: f32 = v2.iter().map(|x| x * x).sum::<f32>().sqrt();
136
137 if norm1 == 0.0 || norm2 == 0.0 {
138 return 1.0;
139 }
140
141 let similarity = (dot / (norm1 * norm2)).clamp(-1.0, 1.0);
142 1.0 - similarity
143}
144
145#[derive(Debug, Clone)]
147pub struct DriftReport {
148 pub l2_magnitude: f32,
150 pub cosine_drift: f32,
152 pub top_dimensions: Vec<(usize, f32)>,
154}
155
156pub fn drift_report(v1: &[f32], v2: &[f32], top_n: usize) -> DriftReport {
158 assert_eq!(v1.len(), v2.len(), "dimension mismatch");
159
160 let l2_magnitude = drift_magnitude_l2(v1, v2);
161 let cosine_drift = drift_magnitude_cosine(v1, v2);
162
163 let mut changes: Vec<(usize, f32)> = v1
164 .iter()
165 .zip(v2.iter())
166 .enumerate()
167 .map(|(i, (a, b))| (i, (a - b).abs()))
168 .collect();
169
170 changes.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
171 changes.truncate(top_n);
172
173 DriftReport {
174 l2_magnitude,
175 cosine_drift,
176 top_dimensions: changes,
177 }
178}
179
180pub fn realized_volatility(trajectory: &[(i64, &[f32])]) -> Result<Vec<f32>, AnalyticsError> {
186 if trajectory.len() < MIN_POINTS_STOCHASTIC {
187 return Err(AnalyticsError::InsufficientData {
188 needed: MIN_POINTS_STOCHASTIC,
189 have: trajectory.len(),
190 });
191 }
192
193 let dim = trajectory[0].1.len();
194 let n = trajectory.len() - 1;
195
196 let mut means = vec![0.0f64; dim];
198 let mut returns = Vec::with_capacity(n);
199
200 for w in trajectory.windows(2) {
201 let dt = (w[1].0 - w[0].0).max(1) as f64;
202 let ret: Vec<f64> = (0..dim)
203 .map(|d| (w[1].1[d] as f64 - w[0].1[d] as f64) / dt)
204 .collect();
205 for d in 0..dim {
206 means[d] += ret[d];
207 }
208 returns.push(ret);
209 }
210
211 for m in &mut means {
212 *m /= n as f64;
213 }
214
215 let mut vol = vec![0.0f64; dim];
216 for ret in &returns {
217 for d in 0..dim {
218 let diff = ret[d] - means[d];
219 vol[d] += diff * diff;
220 }
221 }
222
223 let result: Vec<f32> = vol
224 .iter()
225 .map(|v| (v / (n - 1).max(1) as f64).sqrt() as f32)
226 .collect();
227
228 Ok(result)
229}
230
231pub fn hurst_exponent(trajectory: &[(i64, &[f32])]) -> Result<f32, AnalyticsError> {
240 if trajectory.len() < MIN_POINTS_STOCHASTIC {
241 return Err(AnalyticsError::InsufficientData {
242 needed: MIN_POINTS_STOCHASTIC,
243 have: trajectory.len(),
244 });
245 }
246
247 let increments: Vec<f64> = trajectory
249 .windows(2)
250 .map(|w| {
251 let sum_sq: f64 = w[0]
252 .1
253 .iter()
254 .zip(w[1].1.iter())
255 .map(|(&a, &b)| {
256 let d = b as f64 - a as f64;
257 d * d
258 })
259 .sum();
260 sum_sq.sqrt()
261 })
262 .collect();
263
264 let n = increments.len();
266 let mut log_rs = Vec::new();
267 let mut log_n = Vec::new();
268
269 let mut window = 4;
271 while window <= n / 2 {
272 let rs = rescaled_range(&increments, window);
273 if rs > 0.0 {
274 log_rs.push(rs.ln());
275 log_n.push((window as f64).ln());
276 }
277 window *= 2;
278 }
279
280 if log_rs.len() < 2 {
281 return Ok(0.5); }
283
284 let h = linear_regression_slope(&log_n, &log_rs);
286 Ok(h.clamp(0.0, 1.0) as f32)
287}
288
289pub fn adf_statistic(trajectory: &[(i64, &[f32])]) -> Result<f32, AnalyticsError> {
296 if trajectory.len() < MIN_POINTS_STOCHASTIC {
297 return Err(AnalyticsError::InsufficientData {
298 needed: MIN_POINTS_STOCHASTIC,
299 have: trajectory.len(),
300 });
301 }
302
303 let series: Vec<f64> = trajectory
305 .iter()
306 .map(|(_, v)| {
307 let sum_sq: f64 = v.iter().map(|&x| (x as f64) * (x as f64)).sum();
308 sum_sq.sqrt()
309 })
310 .collect();
311
312 let n = series.len();
314 let mut sum_dy = 0.0;
315 let mut sum_y = 0.0;
316
317 for i in 1..n {
318 let dy = series[i] - series[i - 1];
319 let y_prev = series[i - 1];
320 sum_dy += dy;
321 sum_y += y_prev;
322 }
323
324 let m = (n - 1) as f64;
325 let mean_y = sum_y / m;
326 let mean_dy = sum_dy / m;
327
328 let mut num = 0.0;
330 let mut den = 0.0;
331 let mut residual_ss = 0.0;
332
333 for i in 1..n {
334 let dy = series[i] - series[i - 1];
335 let y_prev = series[i - 1];
336 num += (y_prev - mean_y) * (dy - mean_dy);
337 den += (y_prev - mean_y) * (y_prev - mean_y);
338 }
339
340 if den == 0.0 {
341 return Ok(0.0);
342 }
343
344 let beta = num / den;
345
346 for i in 1..n {
348 let dy = series[i] - series[i - 1];
349 let y_prev = series[i - 1];
350 let predicted = mean_dy + beta * (y_prev - mean_y);
351 let resid = dy - predicted;
352 residual_ss += resid * resid;
353 }
354
355 let se_beta = (residual_ss / (m - 2.0) / den).sqrt();
356
357 if se_beta == 0.0 {
358 return Ok(0.0);
359 }
360
361 let t_stat = beta / se_beta;
362 Ok(t_stat as f32)
363}
364
365fn find_closest_index(trajectory: &[(i64, &[f32])], target: i64) -> usize {
369 trajectory
370 .iter()
371 .enumerate()
372 .min_by_key(|(_, (ts, _))| (ts - target).unsigned_abs())
373 .map(|(i, _)| i)
374 .unwrap_or(0)
375}
376
377fn rescaled_range(data: &[f64], window: usize) -> f64 {
379 let n_windows = data.len() / window;
380 if n_windows == 0 {
381 return 0.0;
382 }
383
384 let mut total_rs = 0.0;
385
386 for w in 0..n_windows {
387 let start = w * window;
388 let end = start + window;
389 let slice = &data[start..end];
390
391 let mean: f64 = slice.iter().sum::<f64>() / window as f64;
392 let std: f64 =
393 (slice.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / window as f64).sqrt();
394
395 if std == 0.0 {
396 continue;
397 }
398
399 let mut cumsum = 0.0;
401 let mut max_dev = f64::NEG_INFINITY;
402 let mut min_dev = f64::INFINITY;
403 for &x in slice {
404 cumsum += x - mean;
405 max_dev = max_dev.max(cumsum);
406 min_dev = min_dev.min(cumsum);
407 }
408
409 let r = max_dev - min_dev;
410 total_rs += r / std;
411 }
412
413 total_rs / n_windows as f64
414}
415
416fn linear_regression_slope(x: &[f64], y: &[f64]) -> f64 {
418 let n = x.len() as f64;
419 let sum_x: f64 = x.iter().sum();
420 let sum_y: f64 = y.iter().sum();
421 let sum_xy: f64 = x.iter().zip(y.iter()).map(|(a, b)| a * b).sum();
422 let sum_xx: f64 = x.iter().map(|a| a * a).sum();
423
424 let denom = n * sum_xx - sum_x * sum_x;
425 if denom == 0.0 {
426 return 0.0;
427 }
428
429 (n * sum_xy - sum_x * sum_y) / denom
430}
431
432#[cfg(test)]
433mod tests {
434 use super::*;
435
436 fn make_trajectory(points: &[(i64, Vec<f32>)]) -> Vec<(i64, &[f32])> {
438 points.iter().map(|(t, v)| (*t, v.as_slice())).collect()
439 }
440
441 #[test]
444 fn velocity_linear_motion() {
445 let points: Vec<(i64, Vec<f32>)> = (0..10)
447 .map(|i| (i as i64 * 1000, vec![i as f32, 0.0, 0.0]))
448 .collect();
449 let traj = make_trajectory(&points);
450
451 let vel = velocity(&traj, 5000).unwrap();
452 assert!((vel[0] - 0.001).abs() < 1e-6, "vel[0] = {}", vel[0]);
454 assert!(vel[1].abs() < 1e-6);
455 assert!(vel[2].abs() < 1e-6);
456 }
457
458 #[test]
459 fn velocity_constant_is_zero() {
460 let points: Vec<(i64, Vec<f32>)> = (0..5)
461 .map(|i| (i as i64 * 1000, vec![1.0, 2.0, 3.0]))
462 .collect();
463 let traj = make_trajectory(&points);
464
465 let vel = velocity(&traj, 2000).unwrap();
466 for &v in &vel {
467 assert!(v.abs() < 1e-6, "expected ~0, got {v}");
468 }
469 }
470
471 #[test]
472 fn velocity_insufficient_data() {
473 let points = vec![(0, vec![1.0])];
474 let traj = make_trajectory(&points);
475 assert!(velocity(&traj, 0).is_err());
476 }
477
478 #[test]
481 fn acceleration_linear_motion_is_zero() {
482 let points: Vec<(i64, Vec<f32>)> = (0..10)
483 .map(|i| (i as i64 * 1000, vec![i as f32 * 2.0, 0.0]))
484 .collect();
485 let traj = make_trajectory(&points);
486
487 let acc = acceleration(&traj, 5000).unwrap();
488 for &a in &acc {
489 assert!(a.abs() < 1e-6, "expected ~0 for linear motion, got {a}");
490 }
491 }
492
493 #[test]
494 fn acceleration_quadratic_motion() {
495 let points: Vec<(i64, Vec<f32>)> = (0..10)
497 .map(|i| {
498 let t = i as f32;
499 (i as i64 * 1000, vec![t * t])
500 })
501 .collect();
502 let traj = make_trajectory(&points);
503
504 let acc = acceleration(&traj, 5000).unwrap();
505 let expected = 2.0 / (1000.0 * 1000.0);
507 assert!(
508 (acc[0] - expected as f32).abs() < 1e-8,
509 "expected ~{expected}, got {}",
510 acc[0]
511 );
512 }
513
514 #[test]
515 fn acceleration_insufficient_data() {
516 let points = vec![(0, vec![1.0]), (1000, vec![2.0])];
517 let traj = make_trajectory(&points);
518 assert!(acceleration(&traj, 0).is_err());
519 }
520
521 #[test]
524 fn drift_stationary_is_zero() {
525 let v = vec![1.0, 2.0, 3.0];
526 assert_eq!(drift_magnitude_l2(&v, &v), 0.0);
527 assert!(drift_magnitude_cosine(&v, &v) < 1e-6);
528 }
529
530 #[test]
531 fn drift_l2_known_value() {
532 let v1 = vec![1.0, 0.0, 0.0];
533 let v2 = vec![0.0, 1.0, 0.0];
534 let drift = drift_magnitude_l2(&v1, &v2);
535 assert!((drift - 2.0f32.sqrt()).abs() < 1e-6);
536 }
537
538 #[test]
539 fn drift_cosine_orthogonal() {
540 let v1 = vec![1.0, 0.0];
541 let v2 = vec![0.0, 1.0];
542 let drift = drift_magnitude_cosine(&v1, &v2);
543 assert!((drift - 1.0).abs() < 1e-6);
544 }
545
546 #[test]
549 fn drift_report_identifies_top_dimensions() {
550 let v1 = vec![1.0, 2.0, 3.0, 4.0, 5.0];
551 let v2 = vec![1.0, 2.0, 10.0, 4.0, 0.0]; let report = drift_report(&v1, &v2, 3);
554 assert_eq!(report.top_dimensions[0].0, 2); assert!((report.top_dimensions[0].1 - 7.0).abs() < 1e-6);
556 assert_eq!(report.top_dimensions[1].0, 4); assert!((report.top_dimensions[1].1 - 5.0).abs() < 1e-6);
558 }
559
560 #[test]
563 fn volatility_constant_is_zero() {
564 let points: Vec<(i64, Vec<f32>)> =
565 (0..20).map(|i| (i as i64 * 1000, vec![1.0, 2.0])).collect();
566 let traj = make_trajectory(&points);
567
568 let vol = realized_volatility(&traj).unwrap();
569 for &v in &vol {
570 assert!(
571 v.abs() < 1e-10,
572 "constant trajectory should have zero volatility"
573 );
574 }
575 }
576
577 #[test]
578 fn volatility_linear_trend() {
579 let points: Vec<(i64, Vec<f32>)> = (0..50)
581 .map(|i| (i as i64 * 1000, vec![i as f32 * 0.1]))
582 .collect();
583 let traj = make_trajectory(&points);
584
585 let vol = realized_volatility(&traj).unwrap();
586 assert!(vol[0] < 1e-6, "linear trend vol = {}", vol[0]);
588 }
589
590 #[test]
593 fn hurst_brownian_motion_approx_05() {
594 use rand::Rng;
596 let mut rng = rand::rng();
597 let n = 1000;
598 let mut points: Vec<(i64, Vec<f32>)> = Vec::with_capacity(n);
599 let mut x = 0.0f32;
600
601 for i in 0..n {
602 x += rng.random::<f32>() - 0.5;
603 points.push((i as i64 * 1000, vec![x]));
604 }
605
606 let traj = make_trajectory(&points);
607 let h = hurst_exponent(&traj).unwrap();
608
609 assert!(
611 (h - 0.5).abs() < 0.2,
612 "Hurst exponent = {h}, expected ~0.5 for BM"
613 );
614 }
615
616 #[test]
619 fn adf_stationary_series() {
620 let n = 200;
622 let mut points: Vec<(i64, Vec<f32>)> = Vec::with_capacity(n);
623 let mut x = 0.0f32;
624 let theta = 0.5; let mu = 1.0f32; let mut rng = rand::rng();
627
628 for i in 0..n {
629 x += theta * (mu - x) + 0.1 * (rand::Rng::random::<f32>(&mut rng) - 0.5);
630 points.push((i as i64 * 1000, vec![x]));
631 }
632
633 let traj = make_trajectory(&points);
634 let stat = adf_statistic(&traj).unwrap();
635
636 assert!(
639 stat < 0.0,
640 "ADF statistic = {stat}, expected negative for stationary series"
641 );
642 }
643
644 #[test]
645 fn adf_random_walk_not_strongly_negative() {
646 let n = 200;
648 let mut points: Vec<(i64, Vec<f32>)> = Vec::with_capacity(n);
649 let mut x = 0.0f32;
650 let mut rng = rand::rng();
651
652 for i in 0..n {
653 x += rand::Rng::random::<f32>(&mut rng) - 0.5;
654 points.push((i as i64 * 1000, vec![x]));
655 }
656
657 let traj = make_trajectory(&points);
658 let stat = adf_statistic(&traj).unwrap();
659
660 assert!(
664 stat > -5.0,
665 "ADF statistic = {stat}, random walk shouldn't be strongly negative"
666 );
667 }
668
669 #[test]
672 fn find_closest_index_exact() {
673 let points = vec![(0i64, vec![0.0f32]), (1000, vec![1.0]), (2000, vec![2.0])];
674 let traj = make_trajectory(&points);
675 assert_eq!(find_closest_index(&traj, 1000), 1);
676 }
677
678 #[test]
679 fn find_closest_index_between() {
680 let points = vec![(0i64, vec![0.0f32]), (1000, vec![1.0]), (2000, vec![2.0])];
681 let traj = make_trajectory(&points);
682 assert_eq!(find_closest_index(&traj, 1200), 1);
683 assert_eq!(find_closest_index(&traj, 1600), 2);
684 }
685
686 #[test]
687 fn linear_regression_known() {
688 let x = vec![1.0, 2.0, 3.0, 4.0, 5.0];
689 let y = vec![2.0, 4.0, 6.0, 8.0, 10.0]; let slope = linear_regression_slope(&x, &y);
691 assert!((slope - 2.0).abs() < 1e-10);
692 }
693}