gt; m00 <- feols(accuracy ~ condition, d1_ai[user_category == 0], cluster = ~ResponseId) summ(m00) term result <char> <char> 1: (Intercept) b = 89.93 [87.88, 91.97], p < .001 2: conditionpersuade b = -5.35 [-8.35, -2.36], p = .001 # treatment AI LESS accurate than dog cat AI # user_category == 0.5 (agnostic) rgt; m05 <- feols(accuracy ~ persuade_direction, d1_ai[user_category == 0.5], cluster = ~ResponseId) rgt; summ(m05) term result <char> <char> 1: (Intercept) b = 82.31 [80.70, 83.92], p < .001 2: persuade_directionyes b = -0.02 [-2.86, 2.83], p = .992 # persuade yes vs no AI is similarly accurate # user_category == 1 (pro) rgt; m10 <- feols(accuracy ~ condition, d1_ai[user_category == 1], cluster = ~ResponseId) rgt; summ(m10) term result <char> <char> 1: (Intercept) b = 90.94 [89.99, 91.89], p < .001 2: conditionpersuade b = -5.19 [-6.70, -3.68], p < .001 # treatment AI LESS accurate than dog cat AI ``` more interesting comparisons - only condition == persuade (i.e., actually treated; not dogcat condition) - set agnostic user_category to reference category ```r persuade_direction user_category accuracy <char> <num> <num> 1: no 0.5 82.31053 2: no 1.0 85.75123 3: yes 0.0 84.57604 4: yes 0.5 82.29540 rgt; # persuasion direction is no, agnostic vs pro-psychedelic m1 <- feols(accuracy ~ user_category, d1_ai_treated[persuade_direction == "no"], cluster = ~ResponseId) summ(m1) term result <char> <char> 1: (Intercept) b = 82.31 [80.70, 83.92], p < .001 2: user_category1 b = 3.44 [1.45, 5.43], p = .001 # ai more accurate when persuading pro than agnostic user rgt; # persuasion direction is yes, agnostic vs anti-psychedelic m2 <- feols(accuracy ~ user_category, d1_ai_treated[persuade_direction == "yes"], cluster = ~ResponseId) summ(m2) term result <char> <char> 1: (Intercept) b = 82.30 [79.94, 84.65], p < .001 2: user_category0 b = 2.28 [-0.92, 5.48], p = .162 # ai also more accurate when persuade anti than agnostic user, but not significant ``` ![[20250129224511.png]]