@@ -91,7 +91,7 @@ ggplot(data = awards_melted, aes(x = value)) +
91
91
92
92
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
93
93
94
- ![ ] ( Poisson_Regression_files/figure-markdown_github/ unnamed-chunk-4 -1.png)
94
+ ![ ] ( figures/poisson- unnamed-chunk-5 -1.png)
95
95
96
96
``` r
97
97
awards $ math <- scale(awards $ math )
@@ -135,8 +135,8 @@ model2 <- stan_glm(num_awards ~ math + prog, data = awards, family = poisson,
135
135
##
136
136
## SAMPLING FOR MODEL 'count' NOW (CHAIN 1).
137
137
##
138
- ## Gradient evaluation took 0.00012 seconds
139
- ## 1000 transitions using 10 leapfrog steps per transition would take 1.2 seconds.
138
+ ## Gradient evaluation took 0.000117 seconds
139
+ ## 1000 transitions using 10 leapfrog steps per transition would take 1.17 seconds.
140
140
## Adjust your expectations accordingly!
141
141
##
142
142
##
@@ -153,9 +153,9 @@ model2 <- stan_glm(num_awards ~ math + prog, data = awards, family = poisson,
153
153
## Iteration: 1800 / 2000 [ 90%] (Sampling)
154
154
## Iteration: 2000 / 2000 [100%] (Sampling)
155
155
##
156
- ## Elapsed Time: 0.236298 seconds (Warm-up)
157
- ## 0.256971 seconds (Sampling)
158
- ## 0.493269 seconds (Total)
156
+ ## Elapsed Time: 0.289811 seconds (Warm-up)
157
+ ## 0.270276 seconds (Sampling)
158
+ ## 0.560087 seconds (Total)
159
159
##
160
160
##
161
161
## SAMPLING FOR MODEL 'count' NOW (CHAIN 2).
@@ -178,15 +178,15 @@ model2 <- stan_glm(num_awards ~ math + prog, data = awards, family = poisson,
178
178
## Iteration: 1800 / 2000 [ 90%] (Sampling)
179
179
## Iteration: 2000 / 2000 [100%] (Sampling)
180
180
##
181
- ## Elapsed Time: 0.272622 seconds (Warm-up)
182
- ## 0.282284 seconds (Sampling)
183
- ## 0.554906 seconds (Total)
181
+ ## Elapsed Time: 0.281356 seconds (Warm-up)
182
+ ## 0.258399 seconds (Sampling)
183
+ ## 0.539755 seconds (Total)
184
184
##
185
185
##
186
186
## SAMPLING FOR MODEL 'count' NOW (CHAIN 3).
187
187
##
188
- ## Gradient evaluation took 3.1e -05 seconds
189
- ## 1000 transitions using 10 leapfrog steps per transition would take 0.31 seconds.
188
+ ## Gradient evaluation took 3e -05 seconds
189
+ ## 1000 transitions using 10 leapfrog steps per transition would take 0.3 seconds.
190
190
## Adjust your expectations accordingly!
191
191
##
192
192
##
@@ -203,15 +203,15 @@ model2 <- stan_glm(num_awards ~ math + prog, data = awards, family = poisson,
203
203
## Iteration: 1800 / 2000 [ 90%] (Sampling)
204
204
## Iteration: 2000 / 2000 [100%] (Sampling)
205
205
##
206
- ## Elapsed Time: 0.280228 seconds (Warm-up)
207
- ## 0.257846 seconds (Sampling)
208
- ## 0.538074 seconds (Total)
206
+ ## Elapsed Time: 0.273531 seconds (Warm-up)
207
+ ## 0.267135 seconds (Sampling)
208
+ ## 0.540666 seconds (Total)
209
209
##
210
210
##
211
211
## SAMPLING FOR MODEL 'count' NOW (CHAIN 4).
212
212
##
213
- ## Gradient evaluation took 3e -05 seconds
214
- ## 1000 transitions using 10 leapfrog steps per transition would take 0.3 seconds.
213
+ ## Gradient evaluation took 3.1e -05 seconds
214
+ ## 1000 transitions using 10 leapfrog steps per transition would take 0.31 seconds.
215
215
## Adjust your expectations accordingly!
216
216
##
217
217
##
@@ -228,9 +228,9 @@ model2 <- stan_glm(num_awards ~ math + prog, data = awards, family = poisson,
228
228
## Iteration: 1800 / 2000 [ 90%] (Sampling)
229
229
## Iteration: 2000 / 2000 [100%] (Sampling)
230
230
##
231
- ## Elapsed Time: 0.256981 seconds (Warm-up)
232
- ## 0.29209 seconds (Sampling)
233
- ## 0.549071 seconds (Total)
231
+ ## Elapsed Time: 0.248926 seconds (Warm-up)
232
+ ## 0.250404 seconds (Sampling)
233
+ ## 0.49933 seconds (Total)
234
234
235
235
``` r
236
236
summary(model2 )
@@ -253,18 +253,18 @@ summary(model2)
253
253
## (Intercept) -0.5 0.2 -0.9 -0.6 -0.5 -0.4 -0.1
254
254
## math 0.3 0.1 0.2 0.3 0.3 0.4 0.5
255
255
## prog2 0.5 0.2 0.0 0.3 0.5 0.6 0.9
256
- ## prog3 0.6 0.3 0.1 0.4 0.6 0.7 1.1
256
+ ## prog3 0.6 0.3 0.1 0.4 0.6 0.7 1.0
257
257
## mean_PPD 1.0 0.1 0.8 0.9 1.0 1.0 1.2
258
- ## log-posterior -252.2 1.4 -255.9 -252.8 -251.8 -251.1 -250.4
258
+ ## log-posterior -252.2 1.4 -255.8 -252.9 -251.9 -251.1 -250.4
259
259
##
260
260
## Diagnostics:
261
261
## mcse Rhat n_eff
262
- ## (Intercept) 0.0 1.0 1650
263
- ## math 0.0 1.0 2690
264
- ## prog2 0.0 1.0 1911
265
- ## prog3 0.0 1.0 1820
266
- ## mean_PPD 0.0 1.0 3438
267
- ## log-posterior 0.0 1.0 1664
262
+ ## (Intercept) 0.0 1.0 1997
263
+ ## math 0.0 1.0 2485
264
+ ## prog2 0.0 1.0 2291
265
+ ## prog3 0.0 1.0 2054
266
+ ## mean_PPD 0.0 1.0 3751
267
+ ## log-posterior 0.0 1.0 1624
268
268
##
269
269
## For each parameter, mcse is Monte Carlo standard error, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence Rhat=1).
270
270
@@ -273,19 +273,19 @@ posterior_interval(model2, prob = 0.95)
273
273
```
274
274
275
275
## 2.5% 97.5%
276
- ## (Intercept) -0.92868856 -0.1355184
277
- ## math 0.18303731 0.4864172
278
- ## prog2 0.02950207 0.9384001
279
- ## prog3 0.08602003 1.0644818
276
+ ## (Intercept) -0.89457959 -0.1447066
277
+ ## math 0.18111692 0.4915252
278
+ ## prog2 0.03168288 0.9214785
279
+ ## prog3 0.07135645 1.0449510
280
280
281
281
``` r
282
282
plot(model2 , plotfun = " areas" , prob = 0.95 )
283
283
```
284
284
285
- ![ ] ( Poisson_Regression_files/figure-markdown_github/ unnamed-chunk-8 -1.png)
285
+ ![ ] ( figures/poisson- unnamed-chunk-9 -1.png)
286
286
287
287
``` r
288
288
pp_check(model2 )
289
289
```
290
290
291
- ![ ] ( Poisson_Regression_files/figure-markdown_github/ unnamed-chunk-9 -1.png)
291
+ ![ ] ( figures/poisson- unnamed-chunk-10 -1.png)
0 commit comments