Commit 818bbf47 authored by Tristan Gingold's avatar Tristan Gingold

libwrtd: refactoring, fix thinko in rules collision.

parent 2b48ab77
...@@ -736,7 +736,7 @@ enum wrtd_status wrtd_alloc_rules(struct wrtd_dev *wrtd, ...@@ -736,7 +736,7 @@ enum wrtd_status wrtd_alloc_rules(struct wrtd_dev *wrtd,
unsigned idx; unsigned idx;
for (idx = 0; idx < nbr_rules; idx++) { for (idx = 0; idx < nbr_rules; idx++) {
rules[n].cpu = cpu; rules[n].cpu = cpu;
rules[n].idx = idx; rules[n].local_idx = idx;
n++; n++;
} }
} }
...@@ -767,13 +767,15 @@ enum wrtd_status wrtd_fill_rules(struct wrtd_dev *wrtd) ...@@ -767,13 +767,15 @@ enum wrtd_status wrtd_fill_rules(struct wrtd_dev *wrtd)
status = wrtd_alloc_rules(wrtd, &wrtd->rules); status = wrtd_alloc_rules(wrtd, &wrtd->rules);
WRTD_RETURN_IF_ERROR(status); WRTD_RETURN_IF_ERROR(status);
/* Read rules from cpus. */
for (i = 0; i < wrtd->nbr_rules; i++) { for (i = 0; i < wrtd->nbr_rules; i++) {
unsigned cpu = wrtd->rules[i].cpu; unsigned cpu = wrtd->rules[i].cpu;
unsigned int addr;
addr = (wrtd->roots[cpu].rules_addr
+ wrtd->rules[i].local_idx * sizeof(struct wrtd_rule));
status = wrtd_msg_readw status = wrtd_msg_readw
(wrtd, cpu, (wrtd, cpu, addr, sizeof(struct wrtd_rule) / 4,
(wrtd->roots[cpu].rules_addr
+ wrtd->rules[i].idx * sizeof(struct wrtd_rule)),
sizeof(struct wrtd_rule) / 4,
(uint32_t *)&wrtd->rules[i].rule); (uint32_t *)&wrtd->rules[i].rule);
if (status != WRTD_SUCCESS) { if (status != WRTD_SUCCESS) {
free(wrtd->rules); free(wrtd->rules);
...@@ -793,7 +795,7 @@ enum wrtd_status wrtd_write_rule(struct wrtd_dev *wrtd, unsigned idx) ...@@ -793,7 +795,7 @@ enum wrtd_status wrtd_write_rule(struct wrtd_dev *wrtd, unsigned idx)
status = wrtd_msg_writew status = wrtd_msg_writew
(wrtd, rule->cpu, (wrtd, rule->cpu,
(wrtd->roots[rule->cpu].rules_addr (wrtd->roots[rule->cpu].rules_addr
+ rule->idx * sizeof(struct wrtd_rule)), + rule->local_idx * sizeof(struct wrtd_rule)),
sizeof(struct wrtd_rule) / 4, sizeof(struct wrtd_rule) / 4,
(uint32_t *)&rule->rule); (uint32_t *)&rule->rule);
......
...@@ -24,10 +24,10 @@ struct wrtd_lib_alarm { ...@@ -24,10 +24,10 @@ struct wrtd_lib_alarm {
}; };
struct wrtd_lib_rule { struct wrtd_lib_rule {
/* CPU on which the rule is. */ /* MT CPU on which the rule is placed. */
unsigned int cpu; unsigned int cpu;
/* Rule index for the cpu. */ /* Rule index on the local MT cpu. */
unsigned int idx; unsigned int local_idx;
/* Rule. */ /* Rule. */
struct wrtd_rule rule; struct wrtd_rule rule;
}; };
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
struct rule_map { struct rule_map {
struct wrtd_lib_rule *rule; struct wrtd_lib_rule *rule;
int cpu; int cpu;
int idx; int global_idx;
}; };
struct device_map { struct device_map {
...@@ -75,7 +75,7 @@ static enum wrtd_status wrtd_reconfigure_alloc_map(struct wrtd_dev *wrtd, ...@@ -75,7 +75,7 @@ static enum wrtd_status wrtd_reconfigure_alloc_map(struct wrtd_dev *wrtd,
continue; continue;
map->rules[n].rule = &wrtd->rules[i]; map->rules[n].rule = &wrtd->rules[i];
map->rules[n].cpu = -1; map->rules[n].cpu = -1;
map->rules[n].idx = -1; map->rules[n].global_idx = -1;
n++; n++;
} }
assert (n == map->nbr_rules); assert (n == map->nbr_rules);
...@@ -164,31 +164,36 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd, ...@@ -164,31 +164,36 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd,
if (!rule->conf.enabled) if (!rule->conf.enabled)
continue; continue;
/* Find reserved input channel. */ /* Place the rule on the same CPU as the input source. */
status = wrtd_find_channel(wrtd, map, rule->conf.source_id, status = wrtd_find_channel(wrtd, map, rule->conf.source_id,
wrtd_input, &dev, &ch); wrtd_input, &dev, &ch);
if (status == WRTD_SUCCESS) { if (status == WRTD_SUCCESS) {
/* Input channel. */
cpu_affinity = map->devs[dev].cpu; cpu_affinity = map->devs[dev].cpu;
} }
else { else {
status = wrtd_find_alarm( status = wrtd_find_alarm(
wrtd, rule->conf.source_id, &ch); wrtd, rule->conf.source_id, &ch);
if (status == WRTD_SUCCESS) { if (status == WRTD_SUCCESS) {
/* Alarm. */
cpu_affinity = wrtd->alarms[ch].cpu; cpu_affinity = wrtd->alarms[ch].cpu;
} }
else { else {
/* Network message. */
cpu_affinity = wrtd->config->rx_cpu; cpu_affinity = wrtd->config->rx_cpu;
} }
} }
/* Find reserved output channel. */ /* Set destination cpu and channel. */
status = wrtd_find_channel(wrtd, map, rule->conf.dest_id, status = wrtd_find_channel(wrtd, map, rule->conf.dest_id,
wrtd_output, &dev, &ch); wrtd_output, &dev, &ch);
if (status == WRTD_SUCCESS) { if (status == WRTD_SUCCESS) {
/* Output device. */
rule->conf.dest_cpu = map->devs[dev].cpu; rule->conf.dest_cpu = map->devs[dev].cpu;
rule->conf.dest_ch = ch; rule->conf.dest_ch = ch;
} }
else { else {
/* Network. */
rule->conf.dest_cpu = wrtd->config->tx_cpu; rule->conf.dest_cpu = wrtd->config->tx_cpu;
rule->conf.dest_ch = WRTD_DEST_CH_NET; rule->conf.dest_ch = WRTD_DEST_CH_NET;
} }
...@@ -204,7 +209,8 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd, ...@@ -204,7 +209,8 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd,
cpu_offset = 0; cpu_offset = 0;
for(cpu = 0; cpu < wrtd->nbr_cpus; cpu++) { for(cpu = 0; cpu < wrtd->nbr_cpus; cpu++) {
const unsigned nbr_cpu_rules = wrtd->roots[cpu].nbr_rules; const unsigned nbr_cpu_rules = wrtd->roots[cpu].nbr_rules;
/* SLOTS[N] contains the index of the rule in map. */ /* SLOTS[N] contains the index of the rule in map, or -1
if the slot is free. */
int *slots; int *slots;
int free_slot_idx; int free_slot_idx;
...@@ -216,7 +222,7 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd, ...@@ -216,7 +222,7 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd,
"cannot allocate slots for %u rules", "cannot allocate slots for %u rules",
nbr_cpu_rules); nbr_cpu_rules);
/* Marks slot as unused. */ /* Mark all slots as unused. */
for (i = 0; i < nbr_cpu_rules; i++) for (i = 0; i < nbr_cpu_rules; i++)
slots[i] = -1; slots[i] = -1;
...@@ -230,7 +236,7 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd, ...@@ -230,7 +236,7 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd,
hash = wrtd_id_hash(m->rule->rule.conf.source_id) hash = wrtd_id_hash(m->rule->rule.conf.source_id)
% nbr_cpu_rules; % nbr_cpu_rules;
m->idx = cpu_offset + hash; m->global_idx = cpu_offset + hash;
m->rule->rule.conf.hash_chain = -1; m->rule->rule.conf.hash_chain = -1;
if (slots[hash] == -1) if (slots[hash] == -1)
slots[hash] = i; slots[hash] = i;
...@@ -241,14 +247,16 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd, ...@@ -241,14 +247,16 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd,
for (i = 0; i < map->nbr_rules; i++) { for (i = 0; i < map->nbr_rules; i++) {
struct rule_map *m = &map->rules[i]; struct rule_map *m = &map->rules[i];
unsigned head_idx; unsigned head_idx;
unsigned head_rule;
if (m->cpu != cpu) if (m->cpu != cpu)
continue; continue;
head_idx = m->idx - cpu_offset; head_idx = m->global_idx - cpu_offset;
assert(head_idx < nbr_cpu_rules); assert(head_idx < nbr_cpu_rules);
head_rule = slots[head_idx];
/* Skip it if already placed. */ /* Skip it if already placed. */
if (slots[head_idx] == i) if (head_rule == i)
continue; continue;
/* Find next free slot (there must be one). */ /* Find next free slot (there must be one). */
...@@ -256,13 +264,13 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd, ...@@ -256,13 +264,13 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd,
free_slot_idx++; free_slot_idx++;
assert(free_slot_idx < nbr_cpu_rules); assert(free_slot_idx < nbr_cpu_rules);
} }
m->idx = cpu_offset + free_slot_idx; m->global_idx = cpu_offset + free_slot_idx;
slots[free_slot_idx] = i; slots[free_slot_idx] = i;
/* Chain. */ /* Chain. */
m->rule->rule.conf.hash_chain = m->rule->rule.conf.hash_chain =
map->rules[head_idx].rule->rule.conf.hash_chain; map->rules[head_rule].rule->rule.conf.hash_chain;
map->rules[head_idx].rule->rule.conf.hash_chain = map->rules[head_rule].rule->rule.conf.hash_chain =
free_slot_idx; free_slot_idx;
free_slot_idx++; free_slot_idx++;
...@@ -283,7 +291,7 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd, ...@@ -283,7 +291,7 @@ static enum wrtd_status wrtd_reconfigure_place(struct wrtd_dev *wrtd,
assert(free_slot_idx < nbr_cpu_rules); assert(free_slot_idx < nbr_cpu_rules);
} }
map->rules[disable_idx].idx = map->rules[disable_idx].global_idx =
cpu_offset + free_slot_idx; cpu_offset + free_slot_idx;
free_slot_idx++; free_slot_idx++;
disable_idx++; disable_idx++;
...@@ -308,7 +316,8 @@ static enum wrtd_status wrtd_reconfigure_write(struct wrtd_dev *wrtd, ...@@ -308,7 +316,8 @@ static enum wrtd_status wrtd_reconfigure_write(struct wrtd_dev *wrtd,
/* Copy rules. */ /* Copy rules. */
for(i = 0; i < map->nbr_rules; i++) for(i = 0; i < map->nbr_rules; i++)
new_rules[map->rules[i].idx].rule = map->rules[i].rule->rule; new_rules[map->rules[i].global_idx].rule =
map->rules[i].rule->rule;
/* Freeze all cpus. */ /* Freeze all cpus. */
/* TODO. */ /* TODO. */
...@@ -361,6 +370,7 @@ enum wrtd_status wrtd_reconfigure(struct wrtd_dev *wrtd) ...@@ -361,6 +370,7 @@ enum wrtd_status wrtd_reconfigure(struct wrtd_dev *wrtd)
status = wrtd_reconfigure_alloc_map(wrtd, &map); status = wrtd_reconfigure_alloc_map(wrtd, &map);
WRTD_RETURN_IF_ERROR(status); WRTD_RETURN_IF_ERROR(status);
/* Place rules. */
status = wrtd_reconfigure_place(wrtd, &map); status = wrtd_reconfigure_place(wrtd, &map);
if (status == WRTD_SUCCESS) if (status == WRTD_SUCCESS)
......
...@@ -333,7 +333,7 @@ static enum wrtd_status wrtd_cmd_dump_rules(struct wrtd_dev *wrtd, ...@@ -333,7 +333,7 @@ static enum wrtd_status wrtd_cmd_dump_rules(struct wrtd_dev *wrtd,
struct wrtd_lib_rule *r = &wrtd->rules[i]; struct wrtd_lib_rule *r = &wrtd->rules[i];
if (wrtd_id_null(r->rule.conf.id)) if (wrtd_id_null(r->rule.conf.id))
continue; continue;
printf("cpu#%u rule %u:\n", r->cpu, r->idx); printf("cpu#%u rule %u:\n", r->cpu, r->local_idx);
disp_rule_conf(&r->rule, wrtd->roots[r->cpu].nbr_rules); disp_rule_conf(&r->rule, wrtd->roots[r->cpu].nbr_rules);
disp_rule_stat(&r->rule); disp_rule_stat(&r->rule);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment