Model implementation

Published

April 16, 2024

Modified

April 15, 2024

The model used for the simulations translates the binary outcome into a binomial outcome by aggregating over the unique groups. For example with the linear predictor:

# data generation process
formals(roadmap.data::get_trial_data)$g
function(d, sim_spec) {
    a0 <- sim_spec$a0
    m <- sim_spec$m
    b <- sim_spec$b
    eta <- a0 + m["l1"] * d$l1 + m["l2"] * d$l2 + (b["erx"] + 
        b["erx-r1"] * d$srp1 + b["erx-r2"] * d$srp2) * d$erx + 
        (b["r1"] * d$srp1 + b["r2"] * d$srp2) * d$r * d$er + 
        (b["r1d"] * d$d * d$srp1 + b["r2d"] * d$d * d$srp2) * 
            d$ed + b["efx"] * d$efx + b["f"] * d$f * d$ef
    eta
}

I get:

# simulate data
set.seed(1)
sim_spec <- roadmap.data::get_sim_spec()

unlist(sim_spec)
         a0        m.l1        m.l2       b.erx    b.erx-r1    b.erx-r2 
 0.61903921 -0.33718806 -0.08682239 -0.10000000 -0.05000000  0.05000000 
       b.r1        b.r2       b.r1d       b.r2d       b.efx         b.f 
 0.20000000  0.09000000  0.30000000  0.10000000  0.25000000  0.15000000 
ll <- roadmap.data::get_trial_data(N = 2500, sim_spec = sim_spec)
d <- copy(ll$d)
head(d[, .(y = sum(y), n = .N), keyby = .(l, er, ed, ef, r, srp0, srp1, srp2, d, f)])
   l er ed ef r srp0 srp1 srp2 d f   y   n
1: 0  0  0  0 0    1    0    0 0 0 167 247
2: 0  0  0  1 0    1    0    0 0 0 112 193
3: 0  0  0  1 0    1    0    0 0 1 116 197
4: 0  0  1  0 0    0    1    0 0 0  12  15
5: 0  0  1  0 0    0    1    0 1 0  11  12
6: 0  0  1  1 0    0    1    0 0 0   5   7

The columns correspond to:

Duration domain depends on what surgery was received, NOT what was originally planned/assigned. For one-stage 12-weeks is reference group (indicated by 0) vs 6 weeks. For two-stage 7 days is reference group (indicated by 0) vs 12 weeks (1).

Within early, late and chronic stage infection, the percentage (by silo, l) receiving each surgery type (srp) as a randomised or non-randomised treatment (er) are:

Percent receiving each surgery type within silo
ll <- roadmap.data::get_trial_data(N = 1e6, sim_spec = sim_spec)
d_tbl <- ll$d[, .(n = .N), keyby = .(l, er, srp)]
d_tbl[, N_l := sum(n), keyby = l]
d_tbl[, pct := 100 * n / N_l]
d_tbl[, N_l := NULL]
d_tbl
    l er srp      n        pct
 1: 0  0   0 270730 90.0593121
 2: 0  0   1  29883  9.9406879
 3: 1  0   0   2028  0.4059987
 4: 1  0   1   2332  0.4668585
 5: 1  0   2   5717  1.1445239
 6: 1  1   0 244552 48.9584772
 7: 1  1   1  73803 14.7751092
 8: 1  1   2 171077 34.2490325
 9: 2  0   0  39954 19.9891934
10: 2  0   1  40146 20.0852520
11: 2  0   2 119778 59.9255546

Stan implementation

The model spec can be translated into stan, albeit using a different likelihood spec:



data {
  int N;
  array[N] int y;
  array[N] int n;
  // variation due to silo/joint
  vector[N] l1;
  vector[N] l2;
  // reveal
  vector[N] er;
  vector[N] ed;
  vector[N] ef;
  // surgery
  vector[N] r;
  // duration
  vector[N] d;
  // vector[N] rp; // revision was recvd
  vector[N] srp0; // dair recvd
  vector[N] srp1; // one-stage revision recvd
  vector[N] srp2; // two-stage revision recvd
  // choice
  vector[N] f;
  
  vector[2] pri_m_sd;
  vector[9] pri_b_sd;
  int prior_only;
}
transformed data{
  vector[N] erx;
  vector[N] erx_srp1;
  vector[N] erx_srp2;
  vector[N] er_r;
  vector[N] er_r_srp1;
  vector[N] er_r_srp2;
  vector[N] ed_d_srp1;
  vector[N] ed_d_srp2;
  vector[N] ef_f;
  
  erx = (1-er) ;
  erx_srp1 = (1-er) .* srp1;
  erx_srp2 = (1-er) .* srp2;
  
  er_r = er .* r;
  
  er_r_srp1 = er_r .* srp1;
  er_r_srp2 = er_r .* srp2;

// you can remove rp srp1 indicates one-stage happened, srp2 indicates two stage
// the rp becomes redundant.
  ed_d_srp1 = ed .* d .* srp1;
  ed_d_srp2 = ed .* d .* srp2;

  ef_f = ef .* f;
}
parameters {
  real a0;
  vector[2] m;
  vector[9] b;
}
transformed parameters{
  vector[N] eta;
 
  eta = a0 + 
      m[1]*l1 + m[2]*l2 +  
      (b[1]*erx + b[2]*erx_srp1 + b[3]*erx_srp2)  +
      (b[4]*er_r_srp1 + b[5]*er_r_srp2) +
      // b[6] * (1 - ed) + 
      (b[6]*ed_d_srp1 + b[7]*ed_d_srp2)  + 
      b[8] * (1 - ef) + (b[9]*ef_f);
      
}
model{
  target += logistic_lpdf(a0 | 0, 1);
  target += normal_lpdf(m | 0, pri_m_sd);
  target += normal_lpdf(b | 0, pri_b_sd);
  
  // likelihood chunks pertaining to each silo
  if(!prior_only){
    target += binomial_logit_lpmf(y | n, eta) ;      
  }
  
  
}
generated quantities{

  // vector[N] eta_r_0;
  // vector[N] eta_r_1;
  // vector[N] eta_d_0;
  // vector[N] eta_d_1;
  // vector[N] eta_f_0;
  // vector[N] eta_f_1;
  // 
  // {
  // 
  //   // predictions on log-odds scale setting all participants to the level
  //   // of interest, e.g. assume all have r = 0
  //   eta_r_0   =  a0 +
  //     m[1]*l1   + m[2]*l2 +
  //     (b[1] * erx_srp0 + b[2] * erx_srp1 + b[3] * erx_srp2) +
  //     // b[6] * (1 - ed  ) + 
  //     (b[6]*ed_d_srp1   + b[7]*ed_d_srp2)   +
  //     b[8] * (1 - ef  ) + (b[9]*ef_f)  ;
  //   // r = 1
  //   // (only those revealed to surgery domain contribute due to the er term)
  //   eta_r_1   =  a0 +
  //     m[1]*l1   + m[2]*l2 +
  //     // note the use of er here and not er_r, i.e. the r is set to 1 for
  //     // everyone
  //     (b[1] * erx_srp0 + b[2] * erx_srp1 + b[3] * erx_srp2) +
  //     (b[4]*er .* srp1 + b[5]*er .* srp2) +
  //     // b[6] * (1 - ed  ) + 
  //     (b[6]*ed_d_srp1   + b[7]*ed_d_srp2)   +
  //     b[8] * (1 - ef  ) + (b[9]*ef_f);
  // 
  //   // duration, d = 0
  //   eta_d_0   =  a0 +
  //     m[1]*l1 + m[2]*l2 +
  //     (b[1] * erx_srp0 + b[2] * erx_srp1 + b[3] * erx_srp2) +
  //     (b[4]*er_r_srp1 + b[5]*er_r_srp2) +
  //     // b[6] * (1 - ed) +
  //     b[8] * (1 - ef) + (b[9]*ef_f);
  //   // d = 1
  //   eta_d_1   =  a0 +
  //     m[1]*l1 + m[2]*l2 +
  //     (b[1] * erx_srp0 + b[2] * erx_srp1 + b[3] * erx_srp2) +
  //     (b[4]*er_r_srp1 + b[5]*er_r_srp2) +
  //     // note the use of ed and rp here and not ed_rp_d_srp1, i.e. the d is
  //     // set to 1 for everyone
  //     // b[6] * (1 - ed) + 
  //     (b[6]*ed .* srp1 + b[7]*ed .* srp2)  +
  //     b[8] * (1 - ef) + (b[9]*ef_f);
  // 
  //   // choice, f = 0
  //   eta_f_0   =  a0 +
  //     m[1]*l1 + m[2]*l2 +
  //     (b[1] * erx_srp0 + b[2] * erx_srp1 + b[3] * erx_srp2) +
  //     (b[4]*er_r_srp1 + b[5]*er_r_srp2) +
  //     // b[6] * (1 - ed) + 
  //     (b[6]*ed_d_srp1 + b[7]*ed_d_srp2)  +
  //     b[8] * (1 - ef)  ;
  //   // f = 1
  //   eta_f_1   =  a0 +
  //     m[1]*l1 + m[2]*l2 +
  //     (b[1] * erx_srp0 + b[2] * erx_srp1 + b[3] * erx_srp2) +
  //     (b[4]*er_r_srp1 + b[5]*er_r_srp2) +
  //     // b[6] * (1 - ed) + 
  //     (b[6]*ed_d_srp1 + b[7]*ed_d_srp2)  +
  //     // note the use of ef here and not ef_f
  //     b[8] * (1 - ef) + (b[9]*ef);
  // 
  // }

}

The model is fitted to a large dataset and the posterior summarised to determine if the parameter estimates approximate the known values.

Note

There are some terms in the model that under ‘ideal’ situation, i.e. where all patients are revealed as initially intended, that may cause estimation problems. These issues can manifest as sluggish MCMC or weird NA terms appearing in the regression results. In general an NA in the regression results from lm means that the coefficient is not estimable. This can happen due to exact collinearity, e.g. when Q3 = a Q1 + b Q2 + c for some a, b and c, but, it can also happen due to not having enough observations to estimate the relevant parameters (e.g. if p >> n). If you predictors are categorical and you’re adding interaction terms, an NA can also mean that there are no observations with that combination of levels of the factors.

Code
m4 <- cmdstanr::cmdstan_model("stan/model-sim-04.stan")

set.seed(1)

sim_spec <- roadmap.data::get_sim_spec()
sim_spec$a0  <- qlogis(0.65)
sim_spec$m["l1"] <- 0.57
sim_spec$m["l2"] <- 0.64
sim_spec$b["erx"] <- -0.1
sim_spec$b["erx-r1"] <- -0.05
sim_spec$b["erx-r2"] <- 0.05
sim_spec$b["r1"] <- -0.6931472
sim_spec$b["r2"] <- -0.6931472
# sim_spec$b["edx"] <- -0.07
sim_spec$b["r1d"] <- -0.6931472
sim_spec$b["r2d"] <- -0.6931472
sim_spec$b["efx"] <- -0.2
sim_spec$b["f"] <- -0.6931472


ll <- roadmap.data::get_trial_data(N = 2e6, sim_spec = sim_spec)
lsd <- roadmap.data::get_stan_data(ll$d)

d <- copy(ll$d)
d_s <- copy(lsd$d_s)
ld <- lsd$ld


# ld$pri_m_sd <- rep(1, 2)
# ld$pri_b_sd <- rep(1, 9)

f1 <- m4$sample(
  ld, iter_warmup = 1000, iter_sampling = 2000,
  parallel_chains = 2, chains = 2, refresh = 0, show_exceptions = F,
  max_treedepth = 13)
Running MCMC with 2 parallel chains...

Chain 1 finished in 1.4 seconds.
Chain 2 finished in 1.5 seconds.

Both chains finished successfully.
Mean chain execution time: 1.5 seconds.
Total execution time: 1.6 seconds.
Code
# See note above. edx is dropped from the model as it is collinear with
# srp1 and srp2 and leads to an estimation issue.

f2 <- glm(y ~ l1 + l2 +
            erx + erx:srp1 + erx:srp2 +
            er:r:srp1 + er:r:srp2 +
            # edx +
            ed:d:srp1 + ed:d:srp2 +
            efx + ef:f,  
          data = d, family = binomial())

# summary(f2)

# dtmp <- data.table(model.matrix(f2)) 
# dtmp <- cbind(model.matrix(f2), tot = 0)
# pracma::rref(dtmp)
# d_s

# dtmp <- data.table(model.matrix(f2))
# 
# rref(dtmp)
# dtmp.mean <- apply(dtmp, 2, mean)
# dtmp <- sweep(dtmp, 2, dtmp.mean)
# 
# f3 <- prcomp(dtmp)
# print(f3)
# 
# # Fairly unsafe - checking for full rank on removal of each var
# linearly_dep_cols(f2)

The parameter estimates align reasonably well to the simulation parameters used in the linear predictor:

Code
post1 <- data.table(f1$draws(variables = c(c("a0", "m", "b")), format = "matrix"))
cf <- coef(f2)

round(rbind(
  "Simulation parameters" = c(sim_spec$a0, sim_spec$m, sim_spec$b),
  "Posterior means" = colMeans(post1),
  "Max likelihood" = c(
    cf[1:3], 
    cf["erx"], cf["erx:srp1"], cf["erx:srp2"], 
    cf["srp1:er:r"], cf["srp2:er:r"] , 
    # cf["edx"],
    cf["srp1:ed:d"], cf["srp2:ed:d"],
    cf["efx"], cf["ef:f"]
  )
), 4)
                                 l1     l2     erx  erx-r1 erx-r2      r1
Simulation parameters 0.6190 0.5700 0.6400 -0.1000 -0.0500 0.0500 -0.6931
Posterior means       0.6201 0.5651 0.6365 -0.1042 -0.0585 0.0664 -0.6996
Max likelihood        0.6203 0.5649 0.6365 -0.1045 -0.0585 0.0664 -0.6995
                           r2     r1d     r2d    efx       f
Simulation parameters -0.6931 -0.6931 -0.6931 -0.200 -0.6931
Posterior means       -0.6927 -0.6844 -0.7004 -0.197 -0.6918
Max likelihood        -0.6927 -0.6845 -0.7004 -0.197 -0.6918

Estimation - G-computation

This section is now incomplete due to the revised model specification.

Note

Calculations based on Bayesian model are complicated by virtue of use of binomial instead of bernoulli likelihood. To deal with this, we weight the log-odds contributions by the number of trials associated with each unique combination.

Predictions on log-odds scale:

eta_r_0 <- f1$draws(variables = c("eta_r_0"), format = "matrix")
eta_r_1 <- f1$draws(variables = c("eta_r_1"), format = "matrix")
eta_d_0 <- f1$draws(variables = c("eta_d_0"), format = "matrix")
eta_d_1 <- f1$draws(variables = c("eta_d_1"), format = "matrix")
eta_f_0 <- f1$draws(variables = c("eta_f_0"), format = "matrix")
eta_f_1 <- f1$draws(variables = c("eta_f_1"), format = "matrix")

The G-formula allows you to go from a conditional estimate to a marginal one via standardisation or other means.

# g-comp - calculations are complicated by virtue of use of binomial
# instead of bernoulli model. Approach is to weight the log-odds contributions
# by the number of trials associated with each unique combination.

max_rows <- 1000

# Compute the effect of revision
b_r <- do.call(rbind, mclapply(1:(min(nrow(eta_r_0), max_rows)), function(ii){
  
  # columns in eta_r_0 correspond to the covariate groups, i.e. the rows in d_s
  
  # The repitition gives the right contribution (weight) for each covariate 
  # combination i.e. each column (covariate combination) is replicated by 
  # the number of pts with this covariate combination. 
  
  # This first one is averaged across both non-reveal and revealed. It is not
  # what we want
  lo_0 <- eta_r_0[ii, rep(1:nrow(d_s), times = ld$n)]
  lo_1 <- eta_r_1[ii, rep(1:nrow(d_s), times = ld$n)]
  mu_r = mean(lo_1 - lo_0)
  
  # Should be no effect of rev in those that were not rand to surg
  # idx <- d_s[er == 0 , which = T]
  # lo_0_erx <- eta_r_0[ii, rep(idx, times = ld$n[idx])]
  # lo_1_erx <- eta_r_1[ii, rep(idx, times = ld$n[idx])]
  # mu_r_erx <- sum(lo_1_erx - lo_0_erx) / sum(ld$n[idx])
  
  # Finally, these give the effect in those revealed to surgery domain. This
  # is what we want.
  idx <- d_s[er == 1 , which = T]
  lo_0_erx <- eta_r_0[ii, rep(idx, times = ld$n[idx])]
  lo_1_erx <- eta_r_1[ii, rep(idx, times = ld$n[idx])]
  mu_r_er <- sum(lo_1_erx - lo_0_erx) / sum(ld$n[idx])
  
  # avg effect then effect in non-reveal and reveal
  c("mu_r" = mu_r, 
    # "mu_r_erx" = mu_r_erx, 
    "mu_r_er" = mu_r_er)
}, mc.cores = 10))


# Compute the effect of duration for the pt that received one-stage
# and for those that received two-stage surgery.
b_d <- do.call(rbind, mclapply(1:(min(nrow(eta_d_0), max_rows)), function(ii){
  
  lo_0 <- eta_d_0[ii, rep(1:nrow(d_s), times = ld$n)]
  lo_1 <- eta_d_1[ii, rep(1:nrow(d_s), times = ld$n)]

  # stratification, silo/revision type that actually took place
  # idx <- d_s[ed == 0 & srp2 == 0, which = T]
  # lo_0_edx_srp1 <- eta_d_0[ii, rep(idx, times = ld$n[idx])]
  # lo_1_edx_srp1 <- eta_d_1[ii, rep(idx, times = ld$n[idx])]
  # mu_d_edx_srp1 <- sum(lo_1_edx_srp1 - lo_0_edx_srp1) / sum(ld$n[idx])
  # 
  # idx <- d_s[ed == 0 & srp2 == 1, which = T]
  # lo_0_edx_srp2 <- eta_d_0[ii, rep(idx, times = ld$n[idx])]
  # lo_1_edx_srp2 <- eta_d_1[ii, rep(idx, times = ld$n[idx])]
  # mu_d_edx_srp2 <- sum(lo_1_edx_srp2 - lo_0_edx_srp2) / sum(ld$n[idx])

  idx <- d_s[ed == 1 & srp2 == 0, which = T]
  lo_0_ed_srp1 <- eta_d_0[ii, rep(idx, times = ld$n[idx])]
  lo_1_ed_srp1 <- eta_d_1[ii, rep(idx, times = ld$n[idx])]
  mu_d_ed_srp1 <- sum(lo_1_ed_srp1 - lo_0_ed_srp1) / sum(ld$n[idx])

  idx <- d_s[ed == 1 & srp2 == 1, which = T]
  lo_0_ed_srp2 <- eta_d_0[ii, rep(idx, times = ld$n[idx])]
  lo_1_ed_srp2 <- eta_d_1[ii, rep(idx, times = ld$n[idx])]
  mu_d_ed_srp2 <- sum(lo_1_ed_srp2 - lo_0_ed_srp2) / sum(ld$n[idx])

  c("mu_d" = mean(lo_1 - lo_0), 
    # you need to base the mean on the n that were used in the strata
    # "mu_d_edx_srp1" = mu_d_edx_srp1,  "mu_d_edx_srp2" = mu_d_edx_srp2,
    "mu_d_ed_srp1" = mu_d_ed_srp1, "mu_d_ed_srp2" = mu_d_ed_srp2
    )
  
}, mc.cores = 10))

# Compute the effect of AB choice.
b_f <- do.call(rbind, mclapply(1:(min(nrow(eta_f_0), max_rows)), function(ii){
  
  # Avg effect of rif - what is the effect of rif in the sample population
  # Might be wrong, but don't believe this is a meaningful/useful quantity.
  lo_0 <- eta_f_0[ii, rep(1:nrow(d_s), times = ld$n)]
  lo_1 <- eta_f_1[ii, rep(1:nrow(d_s), times = ld$n)]
  mu_f <- mean(lo_1 - lo_0)
  
  # Should be no effect of rif in those that were not rand to choice
  # idx <- d_s[ef == 0 , which = T]
  # lo_0_efx <- eta_f_0[ii, rep(idx, times = ld$n[idx])]
  # lo_1_efx <- eta_f_1[ii, rep(idx, times = ld$n[idx])]
  # mu_f_efx <- sum(lo_1_efx - lo_0_efx) / sum(ld$n[idx])

  # Effect of rif in those rand to choice domain
  idx <- d_s[ef == 1 , which = T]
  lo_0_ef <- eta_f_0[ii, rep(idx, times = ld$n[idx])]
  lo_1_ef <- eta_f_1[ii, rep(idx, times = ld$n[idx])]
  mu_f_ef <- sum(lo_1_ef - lo_0_ef) / sum(ld$n[idx])
  
  c("mu_f" = mu_f, 
    # "mu_f_efx" = mu_f_efx, 
    "mu_f_ef" = mu_f_ef)
}, mc.cores = 10))

The following provides the results and also comparisons to combinations of parameters used in data simulation.

For the surgery domain, the comparison of interest is dair (ref) vs revision, which can be obtained by averaging over the distribution of surgery type that took place. The revision effects are silo-specific in that only the late silo is randomised which we obtain by producing a conditional treatment effect by stratifying on the reveal status (er).

Warning

I don’t believe that we currently need to worry about differential selection (\(\mathbb{P}(S=s|G=1)\ne\mathbb{P}(S=s|G=2)\)) because only one silo is contributing to the randomised comparison and I think we are restricting to that silo by virtue of conditiong on er.

# dair (ref)  vs revision (of any form)
avg_comparisons(f2, variables = "r", comparison = "lnor")

 Term              Contrast Estimate Std. Error    z Pr(>|z|)   S  2.5 % 97.5 %
    r ln(odds(1) / odds(0))    -0.17    0.00118 -144   <0.001 Inf -0.172 -0.168

Columns: term, contrast, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high 
Type:  response 
# The above call to avg_comparisons is equivalent to:
# w_eta_r0 <- sweep(eta_r0, 1, ld$n, "*")
d_new <- copy(d)
lo <- copy(d_new[, r := 0])
d_new <- copy(d)
hi <- copy(d_new[, r := 1])
y_lo <- predict(f2, newdata = lo)
y_hi <- predict(f2, newdata = hi)
mean(y_hi - y_lo)
[1] -0.1701395
# But we are only interested in the effect of r in the group that were revealed.
# The following, in avg_comparisons, does not make sense to me. Need to revisit.
# cmp <- avg_comparisons(f2, newdata = d[er == 1 & r == 1], variables = "r", comparison = "lnor")
# print(cmp, digits = 6)

# The randomised comparison should approx map to a weighted combination of the pars.
# The reason that the weights are computed conditional on r == 1 is so that we are
# weight by the proportion within the revision group that receive each revision type 
# (approx 30% one-stage, 70% two-stage) and not the proportion receiving each revision 
# type over all those that received randomised surgery (approx 50% dair, 15% one, 35% two).
mean(d[er == 1 & r == 1, mean(srp1)] * post1$`b[4]` + d[er == 1 & r == 1, mean(srp2)] * post1$`b[5]`) 
[1] -0.6947469

The above series of outputs suggest somewhat that we are able to recover the required parameters using various approaches.

For the choice domain, the comparison of interest is no-rf (ref) vs rif, which can be obtained directly from the parameter estimate that characterised the effects of the randomised comparisons. Choice domain effects reflect an average over the silos since all silos can enter this domain.

# no-rif (ref)  vs rif (of any form)
# NO - avg_comparisons(f2, variables = "f", comparison = "lnor")
# We are only interested in the effect of choice for those revealed to choice
# cmp <- avg_comparisons(f2, newdata = d[ef == 1], variables = "f", comparison = "lnor")
# print(cmp, digits = 6)

# colMeans(b_f)
# And this should just be the same as the coefficient from the model
mean(post1$`b[9]`)
[1] -0.6918082

For the duration domain, the comparison of interest can be obtained directly from the parameter estimates that characterised the effects of the randomised comparisons. Duration domain effects are also currently reflecting an average over the silos.

# long (ref) vs short (specific to the type of surg recvd - one or two stage)

cmp <- avg_comparisons(f2, newdata = d[ed == 1 ], variables = "d", comparison = "lnor", by = "d")
print(cmp, digits = 6)

 Term              Contrast d  Estimate Std. Error        z Pr(>|z|)   S
    d ln(odds(1) / odds(0)) 0 -0.664385 0.00422153 -157.380  < 0.001 Inf
    d ln(odds(1) / odds(0)) 1 -0.664248 0.00422073 -157.377  < 0.001 Inf
     2.5 %    97.5 %
 -0.672659 -0.656111
 -0.672521 -0.655976

Columns: term, contrast, d, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high 
Type:  response 
cmp <- avg_comparisons(f2, newdata = d[ed == 1 & srp2 == 1], variables = "d", comparison = "lnor")
print(cmp, digits = 6)

 Term              Contrast  Estimate Std. Error        z Pr(>|z|)   S    2.5 %
    d ln(odds(1) / odds(0)) -0.700403 0.00541208 -129.415  < 0.001 Inf -0.71101
    97.5 %
 -0.689795

Columns: term, contrast, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high 
Type:  response 
# colMeans(b_d)

# The randomised comparison should approx map directly to the surg type 
# specific pars 
mean(post1$`b[6]`) 
[1] -0.6843975
mean(post1$`b[7]` ) 
[1] -0.7003817

Based on the above, we seem to be in the right ballpark.