21 std::set<critical_cyclet> &set_of_cycles)
23 for(std::set<critical_cyclet>::const_iterator it=set_of_cycles.begin();
24 it!=set_of_cycles.end(); )
26 std::set<critical_cyclet>::const_iterator next=it;
28 auto e_it=it->begin();
30 for(; e_it!=it->end(); ++e_it)
35 set_of_cycles.erase(*it);
52 std::set<critical_cyclet> &set_of_cycles,
56 for(std::size_t i=0; i<egraph.size(); i++)
59 std::list<event_idt>* order=
nullptr;
66 order=&egraph.po_order;
69 order=&egraph.poUrfe_order;
80 order=order_filtering(order);
85 for(std::list<event_idt>::const_iterator
91 egraph.message.debug() <<
"explore " << egraph[source].id <<
messaget::eom;
104 while(!marked_stack.empty())
113 if(egraph.filter_thin_air)
126 std::stack<event_idt> stack(point_stack);
128 while(!stack.empty())
133 egraph.message.debug() <<
"extract: "
134 << egraph[current_vertex].get_operation()
135 << egraph[current_vertex].variable <<
"@"
136 << egraph[current_vertex].thread <<
"~"
137 << egraph[current_vertex].local
145 if(current_vertex==vertex)
151 if(current_vertex==source)
160 std::set<critical_cyclet> &set_of_cycles,
167 bool has_to_be_unsafe,
172 egraph.message.debug() << std::string(80,
'-');
174 egraph.message.debug() <<
"marked size:" << marked_stack.size()
176 std::stack<event_idt> tmp;
177 while(!point_stack.empty())
179 egraph.message.debug() << point_stack.top() <<
" | ";
180 tmp.push(point_stack.top());
186 point_stack.push(tmp.top());
189 while(!marked_stack.empty())
191 egraph.message.debug() << marked_stack.top() <<
" | ";
192 tmp.push(marked_stack.top());
198 marked_stack.push(tmp.top());
204 if(filtering(vertex))
207 egraph.message.debug() <<
"bcktck "<<egraph[vertex].id<<
"#"<<vertex<<
", "
208 <<egraph[source].id<<
"#"<<source<<
" lw:"<<lwfence_met<<
" unsafe:"
211 bool get_com_only=
false;
212 bool unsafe_met_updated=unsafe_met;
213 bool same_var_pair_updated=same_var_pair;
215 bool not_thin_air=
true;
221 irep_idt avoid_at_the_end=var_to_avoid;
223 if(!avoid_at_the_end.
empty() && avoid_at_the_end == this_vertex.
variable)
231 if(!this_vertex.
local)
237 bool has_to_be_unsafe_updated=
false;
241 if(egraph.ignore_arrays ||
249 if(events_per_thread[this_vertex.
thread]==4)
252 events_per_thread[this_vertex.
thread]++;
260 if(has_to_be_unsafe && point_stack.size() >= 2)
262 const event_idt previous=point_stack.top();
264 const event_idt preprevious=point_stack.top();
265 point_stack.push(previous);
266 if(!egraph[preprevious].unsafe_pair(this_vertex, model) &&
268 egraph[preprevious].operation==
271 egraph[preprevious].operation==
274 egraph[preprevious].operation==
280 has_to_be_unsafe_updated=has_to_be_unsafe;
288 if(!point_stack.empty() &&
289 egraph.are_po_ordered(point_stack.top(), vertex) &&
293 this_vertex.
variable==egraph[point_stack.top()].variable)
297 egraph[point_stack.top()].operation==
300 events_per_thread[this_vertex.
thread]--;
305 same_var_pair_updated=
true;
306 if(events_per_thread[this_vertex.
thread]>=3)
312 if(!point_stack.empty() &&
313 egraph.are_po_ordered(point_stack.top(), vertex) &&
317 this_vertex.
variable!=egraph[point_stack.top()].variable)
319 same_var_pair_updated=
false;
327 const unsigned char nb_writes=writes_per_variable[this_vertex.
variable];
328 const unsigned char nb_reads=reads_per_variable[this_vertex.
variable];
330 if(nb_writes+nb_reads==3)
332 events_per_thread[this_vertex.
thread]--;
339 events_per_thread[this_vertex.
thread]--;
343 writes_per_variable[this_vertex.
variable]++;
349 events_per_thread[this_vertex.
thread]--;
353 reads_per_variable[this_vertex.
variable]++;
357 if(!point_stack.empty())
363 egraph.map_data_dp[this_vertex.
thread].dp(prev_vertex, this_vertex));
364 if(unsafe_met_updated &&
366 egraph.are_po_ordered(point_stack.top(), vertex))
367 has_to_be_unsafe_updated=
true;
370 point_stack.push(vertex);
372 marked_stack.push(vertex);
377 for(wmm_grapht::edgest::const_iterator
378 w_it=egraph.po_out(vertex).begin();
379 w_it!=egraph.po_out(vertex).end(); w_it++)
382 if(w==source && point_stack.size()>=4
383 && (unsafe_met_updated
384 || this_vertex.
unsafe_pair(egraph[source], model)) )
391 e_it!=new_cycle.
end();
393 thin_air_events.insert(*e_it);
396 not_thin_air && new_cycle.
is_cycle() &&
399 egraph.message.debug() << new_cycle.
print_name(model,
false)
401 set_of_cycles.insert(new_cycle);
404 set_of_cycles.insert(*reduced);
418 same_var_pair_updated,
420 has_to_be_unsafe_updated,
421 avoid_at_the_end, model);
428 std::set<event_idt> edges_to_remove;
430 for(wmm_grapht::edgest::const_iterator
431 w_it=egraph.com_out(vertex).begin();
432 w_it!=egraph.com_out(vertex).end(); w_it++)
436 edges_to_remove.insert(w);
437 else if(w==source && point_stack.size()>=4 &&
438 (unsafe_met_updated ||
446 e_it!=new_cycle.
end();
448 thin_air_events.insert(*e_it);
451 not_thin_air && new_cycle.
is_cycle() &&
454 egraph.message.debug() << new_cycle.
print_name(model,
false)
456 set_of_cycles.insert(new_cycle);
459 set_of_cycles.insert(*reduced);
480 egraph.remove_com_edge(vertex, e);
485 while(!marked_stack.empty() && marked_stack.top()!=vertex)
492 if(!marked_stack.empty())
506 writes_per_variable[this_vertex.
variable]--;
508 reads_per_variable[this_vertex.
variable]--;
510 events_per_thread[this_vertex.
thread]--;
518 if(skip_tracked.find(vertex)==skip_tracked.end())
519 if(not_thin_air && !get_com_only &&
520 (po_trans > 1 || po_trans==0) &&
521 !point_stack.empty() &&
522 egraph.are_po_ordered(point_stack.top(), vertex) &&
525 egraph[point_stack.top()].operation==
529 egraph[point_stack.top()].operation==
532 skip_tracked.insert(vertex);
534 std::stack<event_idt> tmp;
536 while(!marked_stack.empty() && marked_stack.top()!=vertex)
538 tmp.push(marked_stack.top());
539 mark[marked_stack.top()]=
false;
543 if(!marked_stack.empty())
552 marked_stack.push(tmp.top());
553 mark[tmp.top()]=
true;
557 marked_stack.push(vertex);
560 if(!egraph[point_stack.top()].unsafe_pair(this_vertex, model))
563 if(egraph.ignore_arrays ||
565 avoid_at_the_end=this_vertex.
variable;
572 egraph[point_stack.top()].operation==
576 egraph[point_stack.top()].operation==
579 for(wmm_grapht::edgest::const_iterator w_it=
580 egraph.po_out(vertex).begin();
581 w_it!=egraph.po_out(vertex).end(); w_it++)
590 (po_trans==0?0:po_trans-1),
600 while(!marked_stack.empty() && marked_stack.top()!=vertex)
607 if(!marked_stack.empty())
612 skip_tracked.erase(vertex);
bool unsafe_pair(const abstract_eventt &next, memory_modelt model) const
dstringt has one field, an unsigned integer no which is an index into a static table of strings.
void hide_internals(critical_cyclet &reduced) const
data_typet::const_iterator const_iterator
bool is_unsafe(memory_modelt model, bool fast=false)
checks whether there is at least one pair which is unsafe (takes fences and dependencies into account...
bool is_not_uniproc() const
std::string print_name(const critical_cyclet &redyced, memory_modelt model) const
bool is_not_thin_air() const
bool has_user_defined_fence
std::set< event_idt > thin_air_events
critical_cyclet extract_cycle(event_idt vertex, event_idt source, unsigned number_of_cycles)
extracts a (whole, unreduced) cycle from the stack.
void collect_cycles(std::set< critical_cyclet > &set_of_cycles, memory_modelt model)
Tarjan 1972 adapted and modified for events.
bool backtrack(std::set< critical_cyclet > &set_of_cycles, event_idt source, event_idt vertex, bool unsafe_met, event_idt po_trans, bool same_var_pair, bool lwsync_met, bool has_to_be_unsafe, irep_idt var_to_avoid, memory_modelt model)
see event_grapht::collect_cycles
void filter_thin_air(std::set< critical_cyclet > &set_of_cycles)
after the collection, eliminates the executions forbidden by an indirect thin-air
wmm_grapht::node_indext event_idt
bool is_lwfence(const goto_programt::instructiont &instruction, const namespacet &ns)
const std::string & id2string(const irep_idt &d)
#define UNREACHABLE
This should be used to mark dead code.
#define DATA_INVARIANT(CONDITION, REASON)
This condition should be used to document that assumptions that are made on goto_functions,...