Commit 4a7a4164 authored by Alessandro Rubini's avatar Alessandro Rubini

kill ppi->msg_tmp

There is no need for a "msg_tmp" in the pp-instance. All uses are
really temporary to a function, so we can use local variables instead.

(This is not the same as what was called tmp_header, which was
referenced all over message processing, so it still exists, though
renamed).
Signed-off-by: Alessandro Rubini's avatarAlessandro Rubini <rubini@gnudd.com>
parent ada67af9
......@@ -176,12 +176,6 @@ struct pp_instance {
TimeInternal delay_req_receive_time;
Integer8 log_min_delay_req_interval;
union {
MsgSync sync;
MsgFollowUp follow;
MsgDelayResp resp;
MsgAnnounce announce;
} msg_tmp;
UInteger16 sent_seq[__PP_NR_MESSAGES_TYPES]; /* last sent this type */
MsgHeader received_ptp_header;
MsgHeader delay_req_hdr;
......
......@@ -121,6 +121,7 @@ int st_com_slave_handle_announce(struct pp_instance *ppi, unsigned char *buf,
int len)
{
MsgHeader *hdr = &ppi->received_ptp_header;
MsgAnnounce ann;
if (len < PP_ANNOUNCE_LENGTH)
return -1;
......@@ -132,8 +133,8 @@ int st_com_slave_handle_announce(struct pp_instance *ppi, unsigned char *buf,
ppi->record_update = TRUE;
if (ppi->is_from_cur_par) {
msg_unpack_announce(buf, &ppi->msg_tmp.announce);
s1(ppi, hdr, &ppi->msg_tmp.announce);
msg_unpack_announce(buf, &ann);
s1(ppi, hdr, &ann);
} else {
/* st_com_add_foreign takes care of announce unpacking */
st_com_add_foreign(ppi, buf);
......@@ -156,6 +157,7 @@ int st_com_slave_handle_sync(struct pp_instance *ppi, unsigned char *buf,
TimeInternal origin_tstamp;
TimeInternal correction_field;
MsgHeader *hdr = &ppi->received_ptp_header;
MsgSync sync;
if (len < PP_SYNC_LENGTH)
return -1;
......@@ -177,7 +179,7 @@ int st_com_slave_handle_sync(struct pp_instance *ppi, unsigned char *buf,
ppi->last_sync_corr_field.nanoseconds =
correction_field.nanoseconds;
} else {
msg_unpack_sync(buf, &ppi->msg_tmp.sync);
msg_unpack_sync(buf, &sync);
int64_to_TimeInternal(
ppi->received_ptp_header.correctionfield,
&correction_field);
......@@ -187,7 +189,7 @@ int st_com_slave_handle_sync(struct pp_instance *ppi, unsigned char *buf,
ppi->waiting_for_follow = FALSE;
to_TimeInternal(&origin_tstamp,
&ppi->msg_tmp.sync.originTimestamp);
&sync.originTimestamp);
pp_update_offset(ppi, &origin_tstamp,
&ppi->sync_receive_time,
&correction_field);
......@@ -203,6 +205,7 @@ int st_com_slave_handle_followup(struct pp_instance *ppi, unsigned char *buf,
{
TimeInternal precise_orig_timestamp;
TimeInternal correction_field;
MsgFollowUp follow;
int ret = 0;
MsgHeader *hdr = &ppi->received_ptp_header;
......@@ -228,10 +231,10 @@ int st_com_slave_handle_followup(struct pp_instance *ppi, unsigned char *buf,
return 0;
}
msg_unpack_follow_up(buf, &ppi->msg_tmp.follow);
msg_unpack_follow_up(buf, &follow);
ppi->waiting_for_follow = FALSE;
to_TimeInternal(&precise_orig_timestamp,
&ppi->msg_tmp.follow.preciseOriginTimestamp);
&follow.preciseOriginTimestamp);
int64_to_TimeInternal(ppi->received_ptp_header.correctionfield,
&correction_field);
......
......@@ -11,6 +11,7 @@ int pp_slave(struct pp_instance *ppi, unsigned char *pkt, int plen)
int e = 0; /* error var, to check errors in msg handling */
TimeInternal correction_field;
MsgHeader *hdr = &ppi->received_ptp_header;
MsgDelayResp resp;
int d1, d2;
if (ppi->is_new_state) {
......@@ -62,19 +63,19 @@ int pp_slave(struct pp_instance *ppi, unsigned char *pkt, int plen)
if (e)
break;
msg_unpack_delay_resp(pkt, &ppi->msg_tmp.resp);
msg_unpack_delay_resp(pkt, &resp);
if ((memcmp(&DSPOR(ppi)->portIdentity.clockIdentity,
&ppi->msg_tmp.resp.requestingPortIdentity.clockIdentity,
&resp.requestingPortIdentity.clockIdentity,
PP_CLOCK_IDENTITY_LENGTH) == 0) &&
((ppi->sent_seq[PPM_DELAY_REQ]) ==
hdr->sequenceId) &&
(DSPOR(ppi)->portIdentity.portNumber ==
ppi->msg_tmp.resp.requestingPortIdentity.portNumber)
resp.requestingPortIdentity.portNumber)
&& ppi->is_from_cur_par) {
to_TimeInternal(&ppi->delay_req_receive_time,
&ppi->msg_tmp.resp.receiveTimestamp);
&resp.receiveTimestamp);
int64_to_TimeInternal(
hdr->correctionfield,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment