I always learned to model for 1. See below for the impact of switching the encoding,
model <- glm(ans ~ x, data = simulated_data, family = binomial)
summary(model)
#
# Call:
# glm(formula = ans ~ x, family = binomial, data = simulated_data)
#
# Deviance Residuals:
# 1 2 3 4 5
# 1.6388 -0.6249 -1.2146 -0.8083 0.7389
#
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) -2.4753 2.5006 -0.99 0.322
# x 0.5957 0.6543 0.91 0.363
#
# (Dispersion parameter for binomial family taken to be 1)
#
# Null deviance: 6.7301 on 4 degrees of freedom
# Residual deviance: 5.7505 on 3 degrees of freedom
# AIC: 9.7505
#
# Number of Fisher Scoring iterations: 4
#
simulated_data$ans <- !simulated_data$ans
model_opp <- glm(ans ~ x, data = simulated_data, family = binomial)
summary(model_opp)
#
# Call:
# glm(formula = ans ~ x, family = binomial, data = simulated_data)
#
# Deviance Residuals:
# 1 2 3 4 5
# -1.6388 0.6249 1.2146 0.8083 -0.7389
#
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 2.4753 2.5006 0.99 0.322
# x -0.5957 0.6543 -0.91 0.363
#
# (Dispersion parameter for binomial family taken to be 1)
#
# Null deviance: 6.7301 on 4 degrees of freedom
# Residual deviance: 5.7505 on 3 degrees of freedom
# AIC: 9.7505
#
# Number of Fisher Scoring iterations: 4
Hope this helps.