calyx_opt/passes/
static_inliner.rs

1use crate::analysis::GraphColoring;
2use crate::traversal::{
3    Action, ConstructVisitor, Named, ParseVal, PassOpt, VisResult, Visitor,
4};
5use calyx_frontend::SetAttr;
6use calyx_ir::LibrarySignatures;
7use calyx_ir::structure;
8use calyx_ir::{self as ir, StaticTiming};
9use calyx_utils::CalyxResult;
10use ir::GetAttributes;
11use ir::build_assignments;
12use itertools::Itertools;
13use std::collections::{BTreeMap, HashMap};
14use std::rc::Rc;
15
16pub struct StaticInliner {
17    offload_pause: bool,
18}
19
20impl Named for StaticInliner {
21    fn name() -> &'static str {
22        "static-inline"
23    }
24
25    fn description() -> &'static str {
26        "Compiles Static Control into a single Static Enable"
27    }
28
29    fn opts() -> Vec<PassOpt> {
30        vec![PassOpt::new(
31            "offload-pause",
32            "Whether to pause the static FSM when offloading. Note that this
33            parameter must be in sync with the static-inliner's offload-pause
34            parameter for compilation to work correctly",
35            ParseVal::Bool(true),
36            PassOpt::parse_bool,
37        )]
38    }
39}
40
41impl ConstructVisitor for StaticInliner {
42    fn from(ctx: &ir::Context) -> CalyxResult<Self> {
43        let opts = Self::get_opts(ctx);
44
45        Ok(StaticInliner {
46            offload_pause: opts["offload-pause"].bool(),
47        })
48    }
49
50    fn clear_data(&mut self) {}
51}
52
53impl StaticInliner {
54    // updates single assignment in the same way `update_assignments_timing` does
55    // adds offset to each timing guard in `assigns`
56    // e.g., %[2,3] with offset = 2 -> %[4,5]
57    // all guards also must update so that guard -> guard & %[offset, offset+latency] since that
58    // is when the group will be active in the control, i.e., dst = guard ? src
59    // becomes dst = guard & %[offset, offset+latency] ? src
60    fn update_assignment_timing(
61        assign: &mut ir::Assignment<ir::StaticTiming>,
62        offset: u64,
63        latency: u64,
64    ) {
65        // adding the offset to each timing interval
66        assign.for_each_interval(|timing_interval| {
67            let (beg, end) = timing_interval.get_interval();
68            Some(ir::Guard::Info(ir::StaticTiming::new((
69                beg + offset,
70                end + offset,
71            ))))
72        });
73        // adding the interval %[offset, offset + latency]
74        assign
75            .guard
76            .add_interval(ir::StaticTiming::new((offset, offset + latency)));
77    }
78
79    // calls update_assignment_timing on each assignment in assigns, which does the following:
80    // adds offset to each timing guard in `assigns`
81    // e.g., %[2,3] with offset = 2 -> %[4,5]
82    // all guards also must update so that guard -> guard & %[offset, offset+latency] since that
83    // is when the group will be active in the control, i.e., dst = guard ? src
84    // becomes dst =  guard & %[offset, offset+latency] ? src
85    // total_latency is the latency of the entire control block being inlined.
86    fn update_assignments_timing(
87        assigns: &mut Vec<ir::Assignment<ir::StaticTiming>>,
88        offset: u64,
89        latency: u64,
90        total_latency: u64,
91    ) {
92        if offset == 0 && latency == total_latency {
93            // In this special case, we do nothing, since the timing guards
94            // would be redundant.
95            return;
96        }
97        for assign in assigns {
98            Self::update_assignment_timing(assign, offset, latency);
99        }
100    }
101
102    // Makes assignments such that if branches can start executing on the first
103    // possible cycle.
104    // essentially, on the first cycle, we write port's value into a `cond` = a register.
105    // this is because the tru/false branch might alter port's value when it executes
106    // cond_wire reads from port on the first cycle, and then cond for the other cycles.
107    // this means that all of the tru branch assigns can get the cond_wire ? in front of them,
108    // and all false branch assigns can get !cond_wire ? in front of them
109    // makes the following assignments:
110    // read more here: https://github.com/calyxir/calyx/issues/1344 (specifically
111    // the section "Conditionl")
112    fn make_cond_assigns(
113        cond: ir::RRC<ir::Cell>,
114        cond_wire: ir::RRC<ir::Cell>,
115        port: ir::RRC<ir::Port>,
116        latency: u64,
117        builder: &mut ir::Builder,
118    ) -> Vec<ir::Assignment<ir::StaticTiming>> {
119        structure!( builder;
120            let signal_on = constant(1,1);
121        );
122        let mut cond_assigns = vec![];
123        let cycle_0_guard = ir::Guard::Info(ir::StaticTiming::new((0, 1)));
124        // = %[1:latency] ?
125        let other_cycles_guard =
126            ir::Guard::Info(ir::StaticTiming::new((1, latency)));
127        // cond.in = port
128        let cond_gets_port = builder.build_assignment(
129            cond.borrow().get("in"),
130            Rc::clone(&port),
131            ir::Guard::True,
132        );
133        // cond_wire.in = %0 ? port
134        let cond_wire_gets_port = builder.build_assignment(
135            cond_wire.borrow().get("in"),
136            port,
137            cycle_0_guard.clone(),
138        );
139        cond_assigns.push(cond_gets_port);
140        cond_assigns.push(cond_wire_gets_port);
141        let asgns = build_assignments!(builder;
142            // cond.write_en = %0 ? 1'd1 (since we also have cond.in = %0 ? port)
143            // cond_wire.in = %[1:latency] ? cond.out (since we also have cond_wire.in = %0 ? port)
144            cond["write_en"] = cycle_0_guard ? signal_on["out"];
145            cond_wire["in"] = other_cycles_guard ? cond["out"];
146        );
147        cond_assigns.extend(asgns.to_vec());
148        cond_assigns
149    }
150
151    // Given a static control block `sc`, and the current latency returns a
152    // vec of tuples (i,j) which represents all of the intervals (relative to
153    // the current latency) for which the corresponding fsm will be offloading.
154    // There are two scenarios in the fsm will be offloading:
155    //   1) All static repeat bodies.
156    //   2) If there is a static par in which different threads have overlapping
157    //      offloads, then we offload the entire static par.
158    fn get_offload_latencies(
159        sc: &ir::StaticControl,
160        cur_latency: u64,
161    ) -> Vec<(u64, u64)> {
162        match sc {
163            ir::StaticControl::Enable(_) | ir::StaticControl::Empty(_) => {
164                vec![]
165            }
166            ir::StaticControl::Seq(ir::StaticSeq { stmts, .. }) => {
167                let mut lat = cur_latency;
168                let mut res = vec![];
169                for stmt in stmts {
170                    res.extend(Self::get_offload_latencies(stmt, lat));
171                    lat += stmt.get_latency();
172                }
173                res
174            }
175            ir::StaticControl::Par(ir::StaticPar { stmts, .. }) => {
176                let mut res = vec![];
177                // If the current static par has overlapping offload intervals,
178                // then push the entire par.
179                if Self::have_overlapping_offloads(sc) {
180                    res.push((cur_latency, cur_latency + sc.get_latency()))
181                } else {
182                    // Othwerwise just recursively look into each statement
183                    // for possible offloads.
184                    for stmt in stmts {
185                        res.extend(Self::get_offload_latencies(
186                            stmt,
187                            cur_latency,
188                        ));
189                    }
190                }
191                res
192            }
193            ir::StaticControl::If(ir::StaticIf {
194                tbranch, fbranch, ..
195            }) => {
196                let mut res = Self::get_offload_latencies(tbranch, cur_latency);
197                res.extend(Self::get_offload_latencies(fbranch, cur_latency));
198                res
199            }
200            ir::StaticControl::Repeat(ir::StaticRepeat {
201                num_repeats,
202                body,
203                ..
204            }) => {
205                let res = vec![(
206                    cur_latency,
207                    cur_latency + num_repeats * body.get_latency(),
208                )];
209                res
210            }
211            ir::StaticControl::Invoke(inv) => {
212                dbg!(inv.comp.borrow().name());
213                todo!("implement static inlining for invokes")
214            }
215        }
216    }
217
218    // Checks whether a given static control block `sc` contains a static
219    // par in which different threads have overlapping offload intervals.
220    // Note that this only checks one layer of nesting once it finds a static par.
221    // So if you want to check a deeper layer of nesting you have to call this
222    // function again on the nested static par.
223    fn have_overlapping_offloads(sc: &ir::StaticControl) -> bool {
224        match sc {
225            ir::StaticControl::Enable(_) | ir::StaticControl::Empty(_) => false,
226            ir::StaticControl::Seq(ir::StaticSeq { stmts, .. }) => {
227                stmts.iter().any(Self::have_overlapping_offloads)
228            }
229            ir::StaticControl::Par(ir::StaticPar { stmts, .. }) => {
230                // For each thread, add vec of offload intervals to the vec.
231                // So we have a vec of (vec of tuples/intervals)
232                let intervals: Vec<_> = stmts
233                    .iter()
234                    .map(|stmt| Self::get_offload_latencies(stmt, 0))
235                    .collect();
236                for (intervals1, intervals2) in
237                    intervals.iter().tuple_combinations()
238                {
239                    for &(start1, end1) in intervals1.iter() {
240                        for &(start2, end2) in intervals2.iter() {
241                            // Overlap if either: interval1 a) starts within
242                            // interval2, b) ends within interval2, or c)
243                            // encompasses interval2 entirely.
244                            if (start2 <= end1 && end1 <= end2)
245                                || (start2 <= start1 && start1 <= end2)
246                                || (start1 <= start2 && end2 <= start2)
247                            {
248                                return true;
249                            }
250                        }
251                    }
252                }
253                false
254                // We don't have to check this
255                // stmts.iter().any(|stmt| Self::have_overlapping_repeats(stmt))
256                // because we will check this later on.
257            }
258            ir::StaticControl::If(ir::StaticIf {
259                tbranch, fbranch, ..
260            }) => {
261                Self::have_overlapping_offloads(tbranch)
262                    || Self::have_overlapping_offloads(fbranch)
263            }
264            ir::StaticControl::Repeat(ir::StaticRepeat { body, .. }) => {
265                Self::have_overlapping_offloads(body)
266            }
267            ir::StaticControl::Invoke(inv) => {
268                dbg!(inv.comp.borrow().name());
269                todo!("implement static inlining for invokes")
270            }
271        }
272    }
273
274    // Increases the latency of static group `sg` to `new_lat`.
275    // `new_lat` must be longer than the existing latency.
276    // Useful to make `static par` threads all have the same latency.
277    fn increase_sgroup_latency(sg: ir::RRC<ir::StaticGroup>, new_lat: u64) {
278        assert!(
279            new_lat >= sg.borrow().get_latency(),
280            "New latency must be bigger than existing latency"
281        );
282        sg.borrow_mut().latency = new_lat;
283        sg.borrow_mut().assignments.iter_mut().for_each(|asssign| {
284            asssign.guard.add_interval(StaticTiming::new((0, new_lat)))
285        });
286    }
287
288    fn get_coloring(par_stmts: &[ir::StaticControl]) -> HashMap<usize, usize> {
289        let mut conflict_graph: GraphColoring<usize> =
290            GraphColoring::from(0..par_stmts.len());
291        // Getting the offload intervals for each thread.
292        let offload_interval_info = par_stmts
293            .iter()
294            .map(|stmt| Self::get_offload_latencies(stmt, 0))
295            .collect_vec();
296        // Build conflict graph, where each thread is represented
297        // by its index in `stmts`
298        for (i, j) in (0..par_stmts.len()).tuple_combinations() {
299            let intervals1 = &offload_interval_info[i];
300            let intervals2 = &offload_interval_info[j];
301            for &(start1, end1) in intervals1.iter() {
302                for &(start2, end2) in intervals2.iter() {
303                    if (start2 <= end1 && end1 <= end2)
304                        || (start2 <= start1 && start1 <= end2)
305                        || (start1 <= start2 && end2 <= end1)
306                    {
307                        // If intervals overlap then insert conflict.
308                        conflict_graph.insert_conflict(&i, &j);
309                    }
310                }
311            }
312        }
313
314        conflict_graph.color_greedy(None, true)
315    }
316
317    // inlines the static control `sc` and returns an equivalent single static group
318    fn inline_static_control(
319        &self,
320        sc: &ir::StaticControl,
321        builder: &mut ir::Builder,
322    ) -> ir::RRC<ir::StaticGroup> {
323        match sc {
324            ir::StaticControl::Enable(ir::StaticEnable { group, .. }) => {
325                Rc::clone(group)
326            }
327            ir::StaticControl::Seq(ir::StaticSeq {
328                stmts,
329                latency,
330                attributes,
331            }) => {
332                let seq_group =
333                    builder.add_static_group("static_seq", *latency);
334                let mut seq_group_assigns: Vec<
335                    ir::Assignment<ir::StaticTiming>,
336                > = vec![];
337                let mut cur_offset = 0;
338                for stmt in stmts {
339                    let stmt_latency = stmt.get_latency();
340                    // first recursively call each stmt in seq, and turn each stmt
341                    // into static group g.
342                    let g = self.inline_static_control(stmt, builder);
343                    assert!(
344                        g.borrow().get_latency() == stmt_latency,
345                        "static group latency doesn't match static stmt latency"
346                    );
347                    // get the assignments from g
348                    // currently we clone, since we might need these assignments elsewhere
349                    // We could probably do some sort of analysis to see when we need to
350                    // clone vs. can drain
351                    let mut g_assigns: Vec<ir::Assignment<ir::StaticTiming>> =
352                        g.borrow_mut().assignments.clone();
353                    // add cur_offset to each static guard in g_assigns
354                    // and add %[offset, offset + latency] to each assignment in
355                    // g_assigns
356                    StaticInliner::update_assignments_timing(
357                        &mut g_assigns,
358                        cur_offset,
359                        stmt_latency,
360                        *latency,
361                    );
362                    // add g_assigns to seq_group_assigns
363                    seq_group_assigns.extend(g_assigns.into_iter());
364                    // updates cur_offset so that next stmt gets its static timing
365                    // offset appropriately
366                    cur_offset += stmt_latency;
367                }
368                assert!(
369                    *latency == cur_offset,
370                    "static group latency doesn't match static seq latency"
371                );
372                seq_group.borrow_mut().assignments = seq_group_assigns;
373                seq_group.borrow_mut().attributes = attributes.clone();
374                seq_group
375            }
376            ir::StaticControl::Par(ir::StaticPar {
377                stmts,
378                latency,
379                attributes,
380            }) => {
381                if !self.offload_pause {
382                    // If we don't pause on offload, we can just do things
383                    // conventionally, similar to static seq.
384                    let par_group =
385                        builder.add_static_group("static_par", *latency);
386                    let mut par_group_assigns: Vec<
387                        ir::Assignment<ir::StaticTiming>,
388                    > = vec![];
389                    for stmt in stmts {
390                        let stmt_latency = stmt.get_latency();
391                        // first recursively call each stmt in par, and turn each stmt
392                        // into static group g.
393                        let g = self.inline_static_control(stmt, builder);
394                        assert!(
395                            g.borrow().get_latency() == stmt_latency,
396                            "static group latency doesn't match static stmt latency"
397                        );
398                        // get the assignments from g
399                        let mut g_assigns: Vec<
400                            ir::Assignment<ir::StaticTiming>,
401                        > = g.borrow_mut().assignments.clone();
402                        // and add %[0, group_latency] to each assignment in g_assigns
403                        StaticInliner::update_assignments_timing(
404                            &mut g_assigns,
405                            0,
406                            stmt_latency,
407                            *latency,
408                        );
409                        // add g_assigns to par_group_assigns
410                        par_group_assigns.extend(g_assigns.into_iter());
411                    }
412                    par_group.borrow_mut().assignments = par_group_assigns;
413                    par_group.borrow_mut().attributes = attributes.clone();
414                    par_group
415                } else {
416                    // We build a conflict graph to figure out which
417                    // `static par` threads can share an FSM (they can do so
418                    // so long as they never offload at the same time).
419                    // To do this we perform a greedy coloring, where nodes=threads
420                    // and threads are represented by their index in `stmts`.
421                    let threads_to_colors = Self::get_coloring(stmts);
422                    let colors_to_threads =
423                        GraphColoring::reverse_coloring(&threads_to_colors);
424                    // Need to know the latency of each color (i.e., the
425                    // maximum latency among all threads of that color.)
426                    let colors_to_latencies: BTreeMap<usize, u64> =
427                        colors_to_threads
428                            .into_iter()
429                            .map(|(color, threads)| {
430                                (
431                                    color,
432                                    threads
433                                        .iter()
434                                        .map(|thread| {
435                                            stmts
436                                                .get(*thread)
437                                                .expect("coloring shouldn't produce unkown threads")
438                                                .get_latency()
439                                        })
440                                        .max()
441                                        .expect("par.stmts shouldn't be empty"),
442                                )
443                            })
444                            .collect();
445
446                    // `thread_assigns` maps colors to the assignments corresponding to the
447                    // color (i.e., the assignments corresponding to the color's
448                    // group of threads.)
449                    let mut color_assigns: BTreeMap<
450                        usize,
451                        Vec<ir::Assignment<ir::StaticTiming>>,
452                    > = BTreeMap::new();
453                    // iterate through stmts to build `color_assigns`.
454                    for (index, stmt) in stmts.iter().enumerate() {
455                        // color_latency should be >= stmt_latency
456                        // (color_latency is max of all threads of the color).
457                        let stmt_latency = stmt.get_latency();
458                        let color_latency = *colors_to_latencies
459                            .get(&threads_to_colors[&index])
460                            .expect("coloring has gone wrong somehow");
461
462                        // recursively turn each stmt in the par block into a group g
463                        // and take its assignments.
464                        let stmt_group =
465                            self.inline_static_control(stmt, builder);
466                        assert!(
467                            stmt_group.borrow().get_latency() == stmt_latency,
468                            "static group latency doesn't match static stmt latency"
469                        );
470                        let mut group_assigns =
471                            stmt_group.borrow().assignments.clone();
472
473                        // If we are combining threads with uneven latencies, then
474                        // for the smaller threads we have to add an implicit guard from
475                        // %[0:smaller latency].
476                        if stmt_latency < color_latency {
477                            group_assigns.iter_mut().for_each(|assign| {
478                                assign.guard.add_interval(StaticTiming::new((
479                                    0,
480                                    stmt_latency,
481                                )))
482                            })
483                        }
484
485                        color_assigns
486                            .entry(*threads_to_colors.get(&index).unwrap())
487                            .or_default()
488                            .extend(group_assigns);
489                    }
490
491                    // Now turn `color_assigns` into `groups` (each color gets
492                    // one group).
493                    let mut color_groups = color_assigns
494                        .into_iter()
495                        .map(|(index, assigns)| {
496                            let thread_group = builder.add_static_group(
497                            "static_par_thread",
498                            *colors_to_latencies.get(&index).expect("something has gone wrong merging par threads"));
499                            thread_group.borrow_mut().assignments = assigns;
500                            thread_group
501                        })
502                        .collect_vec();
503
504                    if color_groups.len() == 1 {
505                        // If we only have one group, no need for a wrapper.
506                        let par_group = color_groups.pop().unwrap();
507                        par_group.borrow_mut().attributes = attributes.clone();
508                        par_group
509                    } else {
510                        // We need a wrapper to fire off each thread independently.
511                        let par_group =
512                            builder.add_static_group("static_par", *latency);
513                        let mut par_group_assigns: Vec<
514                            ir::Assignment<ir::StaticTiming>,
515                        > = vec![];
516                        for group in color_groups {
517                            // If color_latency < latency we need to add guard
518                            // color_group[go] = %[0:color_latency] ? 1'd1;
519                            // However, if color_latency will take the same
520                            // number of bits as latency, then we might as
521                            // well just increase the latency of the group to
522                            // avoid making this guard.
523                            // XXX(Caleb): we don't know whether this will be
524                            // one-hot or binary... should encode some way to
525                            // do this.
526                            if group.borrow().latency + 1 == *latency
527                                || group.borrow().latency + 2 == *latency
528                            {
529                                Self::increase_sgroup_latency(
530                                    Rc::clone(&group),
531                                    *latency,
532                                );
533                            }
534
535                            structure!( builder;
536                                let signal_on = constant(1,1);
537                            );
538
539                            // Making assignment:
540                            // color_group[go] = %[0:color_latency] ? 1'd1;
541                            let stmt_guard =
542                                if group.borrow().latency == *latency {
543                                    ir::Guard::True
544                                } else {
545                                    ir::Guard::Info(ir::StaticTiming::new((
546                                        0,
547                                        group.borrow().get_latency(),
548                                    )))
549                                };
550
551                            let trigger_body = build_assignments!(builder;
552                                group["go"] = stmt_guard ? signal_on["out"];
553                            );
554                            par_group_assigns.extend(trigger_body);
555                        }
556
557                        par_group.borrow_mut().assignments = par_group_assigns;
558                        par_group.borrow_mut().attributes = attributes.clone();
559                        par_group
560                            .borrow_mut()
561                            .attributes
562                            .insert(ir::BoolAttr::ParCtrl, 1);
563
564                        // Building a wrapper that just simply executes `par_group`.
565                        // This group could get thrown away, but thats fine, because
566                        // we've guaranteed that `par_group` will never get thrown
567                        // out.
568                        let par_wrapper = builder
569                            .add_static_group("static_par_wrapper", *latency);
570                        structure!( builder;
571                            let signal_on = constant(1,1);
572                        );
573                        let trigger_body = build_assignments!(builder;
574                            par_group["go"] = ? signal_on["out"];
575                        );
576                        // par_wrapper triggers par_group[go]
577                        par_wrapper
578                            .borrow_mut()
579                            .assignments
580                            .extend(trigger_body);
581                        par_wrapper
582                    }
583                }
584            }
585            ir::StaticControl::If(ir::StaticIf {
586                port,
587                tbranch,
588                fbranch,
589                latency,
590                attributes,
591            }) => {
592                // Making sure max of the two branches latency is the latency
593                // of the if statement
594                let tbranch_latency = tbranch.get_latency();
595                let fbranch_latency = fbranch.get_latency();
596                let max_latency =
597                    std::cmp::max(tbranch_latency, fbranch_latency);
598                assert_eq!(
599                    max_latency, *latency,
600                    "if group latency and max of the if branch latencies do not match"
601                );
602
603                // Inline assignments in tbranch and fbranch, and get resulting
604                // tgroup_assigns and fgroup_assigns
605                let tgroup = self.inline_static_control(tbranch, builder);
606                let mut tgroup_assigns: Vec<ir::Assignment<ir::StaticTiming>> =
607                    tgroup.borrow_mut().assignments.clone();
608                assert_eq!(
609                    tbranch_latency,
610                    tgroup.borrow().get_latency(),
611                    "tru branch and tru branch group latency do not match"
612                );
613                // turn fgroup (if it exists) into group and put assigns into fgroup_assigns
614                let mut fgroup_assigns: Vec<ir::Assignment<ir::StaticTiming>> =
615                    match **fbranch {
616                        ir::StaticControl::Empty(_) => vec![],
617                        _ => {
618                            let fgroup =
619                                self.inline_static_control(fbranch, builder);
620                            assert_eq!(
621                                fbranch_latency,
622                                fgroup.borrow().get_latency(),
623                                "false branch and false branch group latency do not match"
624                            );
625                            let fgroup_assigns: Vec<
626                                ir::Assignment<ir::StaticTiming>,
627                            > = fgroup.borrow_mut().assignments.clone();
628                            fgroup_assigns
629                        }
630                    };
631
632                // if_group = the eventual group we inline all the assignments
633                // into.
634                let if_group = builder.add_static_group("static_if", *latency);
635                let mut if_group_assigns: Vec<
636                    ir::Assignment<ir::StaticTiming>,
637                > = vec![];
638                if *latency == 1 {
639                    // Special case: if latency = 1, we don't need a register
640                    // to hold the value of the cond port.
641                    let cond_port_guard = ir::Guard::Port(Rc::clone(port));
642                    let not_cond_port_guard =
643                        ir::Guard::Not(Box::new(cond_port_guard.clone()));
644                    tgroup_assigns.iter_mut().for_each(|assign| {
645                        // adds the cond_port ? guard
646                        assign
647                            .guard
648                            .update(|guard| guard.and(cond_port_guard.clone()))
649                    });
650                    fgroup_assigns.iter_mut().for_each(|assign| {
651                        // adds the !cond_port ? guard
652                        assign.guard.update(|guard| {
653                            guard.and(not_cond_port_guard.clone())
654                        })
655                    });
656                } else {
657                    // If latency != 1, we do need a register to hold the
658                    // value of the cond port.
659                    structure!( builder;
660                        let cond = prim std_reg(port.borrow().width);
661                        let cond_wire = prim std_wire(port.borrow().width);
662                    );
663                    // build_cond_assigns makes assigns such that
664                    // cond_wire.in can guard all of the tru branch assigns,
665                    // and !cond_wire.in can guard all fo the false branch assigns
666                    let cond_assigns = StaticInliner::make_cond_assigns(
667                        Rc::clone(&cond),
668                        Rc::clone(&cond_wire),
669                        Rc::clone(port),
670                        *latency,
671                        builder,
672                    );
673                    if_group_assigns.extend(cond_assigns.to_vec());
674
675                    // need to do two things:
676                    // add cond_wire.out ? in front of each tgroup assignment
677                    // (and ! cond_wire.out for fgroup assignemnts)
678                    // add %[0:tbranch_latency] in front of each tgroup assignment
679                    // (and %[0: fbranch_latency]) in front of each fgroup assignment
680                    let cond_wire_guard =
681                        ir::Guard::Port(cond_wire.borrow().get("out"));
682                    let not_cond_wire_guard =
683                        ir::Guard::Not(Box::new(cond_wire_guard.clone()));
684                    tgroup_assigns.iter_mut().for_each(|assign| {
685                        // adds the %[0:tbranch_latency] ? guard
686                        Self::update_assignment_timing(
687                            assign,
688                            0,
689                            tbranch_latency,
690                        );
691                        // adds the cond_wire ? guard
692                        assign
693                            .guard
694                            .update(|guard| guard.and(cond_wire_guard.clone()))
695                    });
696                    fgroup_assigns.iter_mut().for_each(|assign| {
697                        // adds the %[0:fbranch_latency] ? guard
698                        Self::update_assignment_timing(
699                            assign,
700                            0,
701                            fbranch_latency,
702                        );
703                        // adds the !cond_wire ? guard
704                        assign.guard.update(|guard| {
705                            guard.and(not_cond_wire_guard.clone())
706                        })
707                    });
708                }
709                if_group_assigns.extend(tgroup_assigns);
710                if_group_assigns.extend(fgroup_assigns);
711                if_group.borrow_mut().assignments = if_group_assigns;
712                if_group.borrow_mut().attributes = attributes.clone();
713                if_group
714            }
715            ir::StaticControl::Repeat(ir::StaticRepeat {
716                latency,
717                num_repeats,
718                body,
719                attributes,
720            }) => {
721                let repeat_group =
722                    builder.add_static_group("static_repeat", *latency);
723                // turn body into a group body_group by recursively calling inline_static_control
724                let body_group = self.inline_static_control(body, builder);
725                assert_eq!(
726                    *latency,
727                    (num_repeats * body_group.borrow().get_latency()),
728                    "latency of static repeat is not equal to num_repeats * latency of body"
729                );
730                // the assignments in the repeat group should simply trigger the
731                // body group. So the static group will literally look like:
732                // static group static_repeat <num_repeats * body_latency> {body[go] = 1'd1;}
733                structure!( builder;
734                    let signal_on = constant(1,1);
735                );
736                let trigger_body = build_assignments!(builder;
737                    body_group["go"] = ? signal_on["out"];
738                );
739                repeat_group.borrow_mut().assignments = trigger_body.to_vec();
740                repeat_group.borrow_mut().attributes = attributes.clone();
741                repeat_group
742            }
743            ir::StaticControl::Empty(_) => unreachable!(
744                "should not call inline_static_control on empty stmt"
745            ),
746            ir::StaticControl::Invoke(inv) => {
747                dbg!(inv.comp.borrow().name());
748                todo!("implement static inlining for invokes")
749            }
750        }
751    }
752}
753
754impl Visitor for StaticInliner {
755    /// Executed after visiting the children of a [ir::Static] node.
756    fn start_static_control(
757        &mut self,
758        s: &mut ir::StaticControl,
759        comp: &mut ir::Component,
760        sigs: &LibrarySignatures,
761        _comps: &[ir::Component],
762    ) -> VisResult {
763        let mut builder = ir::Builder::new(comp, sigs);
764        let replacement_group = self.inline_static_control(s, &mut builder);
765        // the replacement group should inherit the original control's position attributes
766        let mut replacement_ctrl =
767            ir::Control::from(ir::StaticControl::from(replacement_group));
768        // the new control node should carry over the position attributes of the original control node
769        replacement_ctrl
770            .get_mut_attributes()
771            .copy_from_set(s.get_attributes(), vec![SetAttr::Pos]);
772        Ok(Action::Change(Box::new(replacement_ctrl)))
773    }
774}