In order to narrow down the difference between your model and millsaps advice, I would suggest you compare your number of free paramters with those mentioned in table 3 of the wu paper.
PT.config.we.theta <- parTable(fa.config.we.theta)
Scaled Chi Square Difference Test (method = "satorra.2000")
Df AIC BIC Chisq Chisq diff Df diff Pr(>Chisq) fa.config.we.theta 106 2254.6 fa.config.myt.theta 120 6021.7 1618.8 8.8352 < 2.2e-16 *** lhs op rhs group free.MYT free.WE1 .p13. == .p86. 0 0 NA2 .p14. == .p87. 0 0 NA3 .p15. == .p88. 0 0 NA4 .p17. == .p90. 0 0 NA5 .p19. == .p92. 0 0 NA6 .p21. == .p94. 0 0 NA7 .p22. == .p95. 0 0 NA8 .p23. == .p96. 0 0 NA9 .p24. == .p97. 0 0 NA10 .p25. == .p98. 0 0 NA11 .p26. == .p99. 0 0 NA12 .p28. == .p101. 0 0 NA13 .p30. == .p103. 0 0 NA14 .p32. == .p105. 0 0 NA15 EXT =~ X1 1 1 216 EXT =~ X1 2 32 3517 EXT =~ X2 1 0 118 EXT =~ X2 2 0 3419 EXT =~ X3 1 2 320 EXT =~ X3 2 33 3621 EXT =~ X4 1 3 422 EXT =~ X4 2 34 3723 EXT =~ X5 1 4 524 EXT =~ X5 2 35 3825 EXT =~ X6 1 5 626 EXT =~ X6 2 36 3927 EXT =~ X7 1 6 728 EXT =~ X7 2 37 4035 INT =~ X10 1 8 1036 INT =~ X10 2 39 4337 INT =~ X11 1 9 1138 INT =~ X11 2 40 4439 INT =~ X12 1 10 1240 INT =~ X12 2 41 4541 INT =~ X8 1 7 942 INT =~ X8 2 38 4243 INT =~ X9 1 0 844 INT =~ X9 2 0 4149 X1 | t1 1 13 1350 X1 | t1 2 44 4651 X1 | t2 1 14 1452 X1 | t2 2 45 4759 X10 | t1 1 26 2860 X10 | t1 2 57 6161 X10 | t2 1 27 2962 X10 | t2 2 58 6269 X11 | t1 1 28 3070 X11 | t1 2 59 6371 X11 | t2 1 29 3172 X11 | t2 2 60 6479 X12 | t1 1 30 3280 X12 | t1 2 61 6587 X2 | t1 1 11 1588 X2 | t1 2 42 4889 X2 | t2 1 12 1690 X2 | t2 2 43 4997 X3 | t1 1 15 1798 X3 | t1 2 46 5099 X3 | t2 1 16 18100 X3 | t2 2 47 51107 X4 | t1 1 17 19108 X4 | t1 2 48 52109 X4 | t2 1 18 20110 X4 | t2 2 49 53117 X5 | t1 1 19 21118 X5 | t1 2 50 54125 X6 | t1 1 20 22126 X6 | t1 2 51 55133 X7 | t1 1 21 23134 X7 | t1 2 52 56141 X8 | t1 1 24 24142 X8 | t1 2 55 57143 X8 | t2 1 25 25144 X8 | t2 2 56 58151 X9 | t1 1 22 26152 X9 | t1 2 53 59153 X9 | t2 1 23 27154 X9 | t2 2 54 6053 X1 ~*~ X1 1 0 054 X1 ~*~ X1 2 0 063 X10 ~*~ X10 1 0 064 X10 ~*~ X10 2 0 073 X11 ~*~ X11 1 0 074 X11 ~*~ X11 2 0 081 X12 ~*~ X12 1 0 082 X12 ~*~ X12 2 0 091 X2 ~*~ X2 1 0 092 X2 ~*~ X2 2 0 0101 X3 ~*~ X3 1 0 0102 X3 ~*~ X3 2 0 0111 X4 ~*~ X4 1 0 0112 X4 ~*~ X4 2 0 0119 X5 ~*~ X5 1 0 0120 X5 ~*~ X5 2 0 0127 X6 ~*~ X6 1 0 0128 X6 ~*~ X6 2 0 0135 X7 ~*~ X7 1 0 0136 X7 ~*~ X7 2 0 0145 X8 ~*~ X8 1 0 0146 X8 ~*~ X8 2 0 0155 X9 ~*~ X9 1 0 0156 X9 ~*~ X9 2 0 029 EXT ~~ EXT 1 0 030 EXT ~~ EXT 2 62 031 EXT ~~ INT 1 31 3332 EXT ~~ INT 2 64 6645 INT ~~ INT 1 0 046 INT ~~ INT 2 63 055 X1 ~~ X1 1 0 056 X1 ~~ X1 2 0 065 X10 ~~ X10 1 0 066 X10 ~~ X10 2 0 075 X11 ~~ X11 1 0 076 X11 ~~ X11 2 0 083 X12 ~~ X12 1 0 084 X12 ~~ X12 2 0 093 X2 ~~ X2 1 0 094 X2 ~~ X2 2 0 0103 X3 ~~ X3 1 0 0104 X3 ~~ X3 2 0 0113 X4 ~~ X4 1 0 0114 X4 ~~ X4 2 0 0121 X5 ~~ X5 1 0 0122 X5 ~~ X5 2 0 0129 X6 ~~ X6 1 0 0130 X6 ~~ X6 2 0 0137 X7 ~~ X7 1 0 0138 X7 ~~ X7 2 0 0147 X8 ~~ X8 1 0 0148 X8 ~~ X8 2 0 0157 X9 ~~ X9 1 0 0158 X9 ~~ X9 2 0 033 EXT ~1 1 0 034 EXT ~1 2 65 047 INT ~1 1 0 048 INT ~1 2 66 057 X1 ~1 1 0 058 X1 ~1 2 0 067 X10 ~1 1 0 068 X10 ~1 2 0 077 X11 ~1 1 0 078 X11 ~1 2 0 085 X12 ~1 1 0 086 X12 ~1 2 0 095 X2 ~1 1 0 096 X2 ~1 2 0 0105 X3 ~1 1 0 0106 X3 ~1 2 0 0115 X4 ~1 1 0 0116 X4 ~1 2 0 0123 X5 ~1 1 0 0124 X5 ~1 2 0 0131 X6 ~1 1 0 0132 X6 ~1 2 0 0139 X7 ~1 1 0 0140 X7 ~1 2 0 0149 X8 ~1 1 0 0150 X8 ~1 2 0 0159 X9 ~1 1 0 0160 X9 ~1 2 0 0configural.myt.theta <- "EXT =~ c(1, 1)*X2 + X1 + X3 + X4 + X5 + X6 + X7 # LOADINGS CONSTRAINED ON ANCHORSINT =~ c(1, 1)*X9 + X8 + X10 + X11 + X12X2 | c(t1_2, t1_2)*t1 + c(t2_2, t2_2)*t2 # FIRST THRESHOLD ON EACH ITEM, AND SECOND THRESHOLD ON ANCHORSX1 | c(t1_1, t1_1)*t1 + t2X3 | c(t1_3, t1_3)*t1 + t2X4 | c(t1_4, t1_4)*t1 + t2X5 | c(t1_5, t1_5)*t1X6 | c(t1_6, t1_6)*t1X7 | c(t1_7, t1_7)*t1X9 | c(t1_9, t1_9)*t1 + c(t2_9, t2_9)*t2X8 | c(t1_8, t1_8)*t1 + t2X10 | c(t1_10, t1_10)*t1 + t2X11 | c(t1_11, t1_11)*t1 + t2X12 | c(t1_12, t1_12)*t1X1 ~ c(0, 0)*1 # ALL INTERCEPTS TO ZEROX2 ~ c(0, 0)*1X3 ~ c(0, 0)*1X4 ~ c(0, 0)*1X5 ~ c(0, 0)*1X6 ~ c(0, 0)*1X7 ~ c(0, 0)*1X8 ~ c(0, 0)*1X9 ~ c(0, 0)*1X10 ~ c(0, 0)*1X11 ~ c(0, 0)*1X12 ~ c(0, 0)*1X1 ~~ c(1,1)*X1 # ALL UNIQUE VARIANCES TO 1X2 ~~ c(1,1)*X2X3 ~~ c(1,1)*X3X4 ~~ c(1,1)*X4X5 ~~ c(1,1)*X5X6 ~~ c(1,1)*X6X7 ~~ c(1,1)*X7X8 ~~ c(1,1)*X8X9 ~~ c(1,1)*X9X10 ~~ c(1,1)*X10X11 ~~ c(1,1)*X11X12 ~~ c(1,1)*X12EXT ~~ c(1,NA)*EXT # FIXED MEAN AND VARIANCE IN FIRST GROUP, FREE COVARIANCEINT ~~ c(1,NA)*INTEXT ~~ NA*INTEXT ~ c(0, NA)*1INT ~ c(0, NA)*1"configural.we.theta <- "EXT =~ c(NA, NA)*X2 + X1 + X3 + X4 + X5 + X6 + X7 # ALL FREE LOADINGSINT =~ c(NA, NA)*X9 + X8 + X10 + X11 + X12X1 | t1 + t2 # ALL FREE THRESHOLDSX2 | t1 + t2X3 | t1 + t2X4 | t1 + t2X5 | t1X6 | t1X7 | t1X8 | t1 + t2X9 | t1 + t2X10 | t1 + t2X11 | t1 + t2X12 | t1X1 ~ c(0, 0)*1 # ALL INTERCEPTS TO ZEROX2 ~ c(0, 0)*1X3 ~ c(0, 0)*1X4 ~ c(0, 0)*1X5 ~ c(0, 0)*1X6 ~ c(0, 0)*1X7 ~ c(0, 0)*1X8 ~ c(0, 0)*1X9 ~ c(0, 0)*1X10 ~ c(0, 0)*1X11 ~ c(0, 0)*1X12 ~ c(0, 0)*1X1 ~~ c(1,1)*X1 # ALL UNIQUE VARIANCES TO 1X2 ~~ c(1,1)*X2X3 ~~ c(1,1)*X3X4 ~~ c(1,1)*X4X5 ~~ c(1,1)*X5X6 ~~ c(1,1)*X6X7 ~~ c(1,1)*X7X8 ~~ c(1,1)*X8X9 ~~ c(1,1)*X9X10 ~~ c(1,1)*X10 X11 ~~ c(1,1)*X11X12 ~~ c(1,1)*X12EXT ~~ c(1,1)*EXT # FIXED MEANS AND VARIANCES IN BOTH GROUPS, FREE COVARIANCEINT ~~ c(1,1)*INTEXT ~~ NA*INTEXT ~ c(0, 0)*1INT ~ c(0, 0)*1"> lav_partable_df(partable(fa.config))
[1] 106
> fitmeasures(fa.config, "df")
df
120
> lav_partable_df(partable(fa.config.we.delta))
[1] 106
> fitmeasures(fa.config.we.delta, "df")
df
106
> lav_partable_df(partable(fa.config.we.theta))
[1] 106
> fitmeasures(fa.config.we.theta, "df")
df
106 N <- 1000
pop <- ' F1 =~ .6*X1 + .7*X2 + .8*X3 + .9*X4 '
## generate normal latent-response data
dat <- simulateData(pop, model.type = "cfa", sample.nobs = N,
standardized = TRUE, std.lv = TRUE, seed = 12345,
empirical = TRUE)
## apply thresholds for binary data
dat2 <- data.frame(lapply(dat, cut, breaks = c(-Inf, 0, Inf),
ordered_result = TRUE))
dat2$group <- 1:2
## apply thresholds for ternary data
dat3 <- data.frame(lapply(dat, cut, breaks = c(-Inf, -.5, .5, Inf),
ordered_result = TRUE))
dat3$group <- 1:2
## mixture of binary and ternary data
mix <- cbind(dat3[c("X1","X2")], dat2[c("X3","X4")])
#### fit models to 3-category data
# CONFIGURAL as in Wu Estabrook Condition 7
mod.c7.3 <- "
F1 =~ c(NA, NA)*X1 + X2 + X3 + X4
X1 | t1 + t2
X2 | t1 + t2
X3 | t1 + t2
X4 | t1 + t2
X1 ~ c(0, 0)*1
X2 ~ c(0, 0)*1
X3 ~ c(0, 0)*1
X4 ~ c(0, 0)*1
X1 ~*~ c(1, 1)*X1
X2 ~*~ c(1, 1)*X2
X3 ~*~ c(1, 1)*X3
X4 ~*~ c(1, 1)*X4
F1 ~~ c(1, 1)*F1
F1 ~ c(0, 0)*1
"
# CONFIGURAL as in Wu Estabrook Condition 8
mod.c8.3 <- "
F1 =~ c(NA, NA)*X1 + X2 + X3 + X4
X1 | t1 + t2
X2 | t1 + t2
X3 | t1 + t2
X4 | t1 + t2
X1 ~ c(0, 0)*1
X2 ~ c(0, 0)*1
X3 ~ c(0, 0)*1
X4 ~ c(0, 0)*1
X1 ~~ c(1, 1)*X1
X2 ~~ c(1, 1)*X2
X3 ~~ c(1, 1)*X3
X4 ~~ c(1, 1)*X4
F1 ~~ c(1, 1)*F1
F1 ~ c(0, 0)*1
"
# Millsap & Yun-Tein's (MYT) parameterization
mod.myt3 <- "
F1 =~ c(1, 1)*X1 + X2 + X3 + X4
X1 | c(t1_1, t1_1)*t1 + c(t2_1, t2_1)*t2
X2 | c(t1_2, t1_2)*t1 + t2
X3 | c(t1_3, t1_3)*t1 + t2
X4 | c(t1_4, t1_4)*t1 + t2
X1 ~ c(0, 0)*1
X2 ~ c(0, 0)*1
X3 ~ c(0, 0)*1
X4 ~ c(0, 0)*1
X1 ~~ c(1, NA)*X1
X2 ~~ c(1, NA)*X2
X3 ~~ c(1, NA)*X3
X4 ~~ c(1, NA)*X4
F1 ~~ c(NA, NA)*F1
F1 ~ c( 0, NA)*1
"
fit.c7.3 <- cfa(mod.c7.3, data = dat3, group = "group", parameterization="delta")
fit.c8.3 <- cfa(mod.c8.3, data = dat3, group = "group", parameterization="theta")
fit.myt3 <- cfa(mod.myt3, data = dat3, group = "group", parameterization="theta")
WED <- partable(fit.c7.3)
WET <- partable(fit.c8.3)
MYT <- partable(fit.myt3) # does not subtract constraints
lav_partable_npar(WED)
lav_partable_npar(WET)
lav_partable_npar(MYT)
lav_partable_ndat(WED)
lav_partable_ndat(WET)
lav_partable_ndat(MYT)
lav_partable_df(WED)
lav_partable_df(WET)
lav_partable_df(MYT) # wrong
fitmeasures(fit.myt3, "df") # correct
WED #[WED$op %in% c("~~","~*~"), ]
WET #[WET$op %in% c("~~","~*~"), ]
MYT #[MYT$op %in% c("~~","~*~"), ]library(lavaan)
dat5 <- read.csv("example5c.csv", header = FALSE)colnames(dat5) <- c("u1", "u2", "u3", "u4", "u5", "u6", "u7", "u8", "g")for(i in 1:8) dat5[,i] <- ordered(dat5[,i])dat5[,9] <- factor(dat5[,9], labels = c("male", "female"))
configural5 <- "f1 =~ c(1, 1)*u1 + u2 + u3 + u4f2 =~ c(1, 1)*u5 + u6 + u7 + u8u1 | c(t11, t11)*t1 + c(t12, t12)*t2 + t3 + t4u2 | c(t21, t21)*t1 + t2 + t3 + t4u3 | c(t31, t31)*t1 + t2 + t3 + t4u4 | c(t41, t41)*t1 + t2 + t3 + t4u5 | c(t51, t51)*t1 + c(t52, t52)*t2 + t3 + t4u6 | c(t61, t61)*t1 + t2 + t3 + t4u7 | c(t71, t71)*t1 + t2 + t3 + t4u8 | c(t81, t81)*t1 + t2 + t3 + t4f1 ~~ NA*f1f2 ~~ NA*f2f1 ~~ NA*f2f1 ~ c(0, NA)*1f2 ~ c(0, NA)*1u1 ~~ c(1, NA)*u1u2 ~~ c(1, NA)*u2u3 ~~ c(1, NA)*u3u4 ~~ c(1, NA)*u4u5 ~~ c(1, NA)*u5u6 ~~ c(1, NA)*u6u7 ~~ c(1, NA)*u7u8 ~~ c(1, NA)*u8"
configural5.we.theta <- "f1 =~ c(NA, NA)*u1 + u2 + u3 + u4f2 =~ c(NA, NA)*u5 + u6 + u7 + u8u1 | t1 + t2 + t3 + t4u2 | t1 + t2 + t3 + t4u3 | t1 + t2 + t3 + t4u4 | t1 + t2 + t3 + t4u5 | t1 + t2 + t3 + t4u6 | t1 + t2 + t3 + t4u7 | t1 + t2 + t3 + t4u8 | t1 + t2 + t3 + t4f1 ~~ c(1,1)*f1f2 ~~ c(1,1)*f2f1 ~~ NA*f2f1 ~ c(0, 0)*1f2 ~ c(0, 0)*1u1 ~~ c(1, 1)*u1u2 ~~ c(1, 1)*u2u3 ~~ c(1, 1)*u3u4 ~~ c(1, 1)*u4u5 ~~ c(1, 1)*u5u6 ~~ c(1, 1)*u6u7 ~~ c(1, 1)*u7u8 ~~ c(1, 1)*u8"configural5.we.theta.alt <- "f1 =~ c(NA,NA)*u2 + c(l1, l1)*u1 + u3 + u4f2 =~ c(NA,NA)*u6 + c(l5, l5)*u5 + u7 + u8u1 | c(t11, t11)*t1 + t2 + t3 + t4u2 | t1 + t2 + t3 + t4u3 | t1 + t2 + t3 + t4u4 | t1 + t2 + t3 + t4u5 | c(t15, t15)*t1 + t2 + t3 + t4u6 | t1 + t2 + t3 + t4u7 | t1 + t2 + t3 + t4u8 | t1 + t2 + t3 + t4f1 ~~ c(1,NA)*f1f2 ~~ c(1,NA)*f2f1 ~~ NA*f2f1 ~ c(0, NA)*1f2 ~ c(0, NA)*1u1 ~~ c(1, 1)*u1u2 ~~ c(1, 1)*u2u3 ~~ c(1, 1)*u3u4 ~~ c(1, 1)*u4u5 ~~ c(1, 1)*u5u6 ~~ c(1, 1)*u6u7 ~~ c(1, 1)*u7u8 ~~ c(1, 1)*u8"configural5.we.delta <- "f1 =~ c(NA, NA)*u1 + u2 + u3 + u4f2 =~ c(NA, NA)*u5 + u6 + u7 + u8u1 | t1 + t2 + t3 + t4u2 | t1 + t2 + t3 + t4u3 | t1 + t2 + t3 + t4u4 | t1 + t2 + t3 + t4u5 | t1 + t2 + t3 + t4u6 | t1 + t2 + t3 + t4u7 | t1 + t2 + t3 + t4u8 | t1 + t2 + t3 + t4f1 ~~ c(1,1)*f1f2 ~~ c(1,1)*f2f1 ~~ NA*f2f1 ~ c(0, 0)*1f2 ~ c(0, 0)*1u1 ~*~ c(1, 1)*u1u2 ~*~ c(1, 1)*u2u3 ~*~ c(1, 1)*u3u4 ~*~ c(1, 1)*u4u5 ~*~ c(1, 1)*u5u6 ~*~ c(1, 1)*u6u7 ~*~ c(1, 1)*u7u8 ~*~ c(1, 1)*u8"outConfigural5 <- cfa(configural5, data = dat5, group = "g", parameterization="theta", estimator="wlsmv")outConfigural5.we.theta <- cfa(configural5.we.theta, data = dat5, group = "g", parameterization="theta", estimator="wlsmv")outConfigural5.we.theta.alt <- cfa(configural5.we.theta.alt, data = dat5, group = "g", parameterization="theta", estimator="wlsmv")outConfigural5.we.delta <- cfa(configural5.we.delta, data = dat5, group = "g", parameterization="delta", estimator="wlsmv")
c( df.partab =lav_partable_df(partable(outConfigural5)),fitMeasures(outConfigural5, c("df","rmsea", "mfi", "cfi")), moreFitIndices(outConfigural5)["gammaHat"] )c( df.partab =lav_partable_df(partable(outConfigural5.we.theta)),fitMeasures(outConfigural5.we.theta, c("df","rmsea", "mfi", "cfi")), moreFitIndices(outConfigural5.we.theta)["gammaHat"] )c( df.partab =lav_partable_df(partable(outConfigural5.we.theta.alt)), fitMeasures(outConfigural5.we.theta.alt, c("df","rmsea", "mfi", "cfi")), moreFitIndices(outConfigural5.we.theta.alt)["gammaHat"] )c( df.partab =lav_partable_df(partable(outConfigural5.we.delta)), fitMeasures(outConfigural5.we.delta, c("df","rmsea", "mfi", "cfi")), moreFitIndices(outConfigural5.we.delta)["gammaHat"] )
Also, my original issue of non-convergence using the MYT parameterisation on my original data remains.
Although she goes by Jenn informally, Jenn's full first name is Jenn-Yun and her last name is Tein.