Permalink
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
1292 lines (1203 sloc) 119 KB
@article{zwaanReplicationsShouldBe2014,
title = {Replications {{Should Be Performed With Power}} and {{Precision}}: {{A Response}} to {{Rommers}}, {{Meyer}}, and {{Huettig}} (2013)},
volume = {25},
issn = {0956-7976 1467-9280},
doi = {10.1177/0956797613509634},
journal = {Psychological Science},
author = {Zwaan, Rolf A.},
year = {2014},
pages = {305 --307},
file = {D:\\core\\reading\\psychological science\\Zwaan - 2014 - Replications Should Be Performed With Power and Pr.pdf}
}
@article{simonsRegisteredReplicationReport2014,
title = {Registered {{Replication Report}}: {{Schooler}} and {{Engstler}}-{{Schooler}} (1990)},
volume = {9},
doi = {10.1177/1745691614545653},
abstract = {Trying to remember something now typically improves your ability to remember it later. However, after watching a video of a simulated bank robbery, participants who verbally described the robber were 25\% worse at identifying the robber in a lineup than were participants who instead listed U.S. states and capitals\textemdash{}this has been termed the ``verbal overshadowing'' effect (Schooler \& Engstler-Schooler, 1990). More recent studies suggested that this effect might be substantially smaller than first reported. Given uncertainty about the effect size, the influence of this finding in the memory literature, and its practical importance for police procedures, we conducted two collections of preregistered direct replications (RRR1 and RRR2) that differed only in the order of the description task and a filler task. In RRR1, when the description task immediately followed the robbery, participants who provided a description were 4\% less likely to select the robber than were those in the control condition. In RRR2, when the description was delayed by 20 min, they were 16\% less likely to select the robber. These findings reveal a robust verbal overshadowing effect that is strongly influenced by the relative timing of the tasks. The discussion considers further implications of these replications for our understanding of verbal overshadowing.},
journal = {Perspectives on Psychological Science},
author = {Simons, Daniel J.},
month = sep,
year = {2014},
keywords = {recognition memory,verbal overshadowing,eyewitness,lineup identification,replication},
pages = {556-578},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\FGP8JZ4N\\Perspectives on Psychological Science-2014-Simons-556-78.pdf}
}
@article{kleinInvestigatingVariationReplicability2014,
title = {Investigating {{Variation}} in {{Replicability}}},
volume = {45},
doi = {10.1027/1864-9335/a000178},
abstract = {Although replication is a central tenet of science, direct replications are rare in psychology. This research tested variation in the replicability of 13 classic and contemporary effects across 36 independent samples totaling 6,344 participants. In the aggregate, 10 effects replicated consistently. One effect \textendash{} imagined contact reducing prejudice \textendash{} showed weak support for replicability. And two effects \textendash{} flag priming influencing conservatism and currency priming influencing system justification \textendash{} did not replicate. We compared whether the conditions such as lab versus online or US versus international sample predicted effect magnitudes. By and large they did not. The results of this small sample of effects suggest that replicability is more dependent on the effect itself than on the sample and setting used to investigate the effect.},
journal = {Social Psychology},
author = {Klein, Richard A. and Ratliff, Kate A. and Vianello, Michelangelo and Adams Jr, Reginald B. and Bahn\'ik, {\v S}t{\v e}p\'an and Bernstein, Michael J. and Bocian, Konrad and Brandt, Mark J. and Brooks, Beach and Brumbaugh, Claudia Chloe and Cemalcilar, Zeynep and Chandler, Jesse and Cheong, Winnee and Davis, William E. and Devos, Thierry and Eisner, Matthew and Frankowska, Natalia and Furrow, David and Galliani, Elisa Maria and Hasselman, Fred and Hicks, Joshua A. and Hovermale, James F. and Hunt, S. Jane and Huntsinger, Jeffrey R. and Ijzerman, Hans and John, Melissa-Sue and {Joy-Gaba}, Jennifer A. and Barry Kappes, Heather and Krueger, Lacy E. and Kurtz, Jaime and Levitan, Carmel A. and Mallett, Robyn K. and Morris, Wendy L. and Nelson, Anthony J. and Nier, Jason A. and Packard, Grant and Pilati, Ronaldo and Rutchick, Abraham M. and Schmidt, Kathleen and Skorinko, Jeanine L. and Smith, Robert and Steiner, Troy G. and Storbeck, Justin and Van Swol, Lyn M. and Thompson, Donna and {van `t Veer}, A. E. and Ann Vaughn, Leigh and Vranka, Marek and Wichman, Aaron L. and Woodzicka, Julie A. and Nosek, Brian A.},
year = {2014},
pages = {142-152},
file = {D:\\core\\reading\\unsort\\Klein 等。 - 2014 - Investigating Variation in Replicability.pdf}
}
@article{RommersObjectshapeorientation2013,
title = {Object Shape and Orientation Do Not Routinely Influence Performance during Language Processing},
volume = {24},
issn = {1467-9280 (Electronic) 0956-7976 (Linking)},
doi = {10.1177/0956797613490746},
abstract = {The role of visual representations during language processing remains unclear: They could be activated as a necessary part of the comprehension process, or they could be less crucial and influence performance in a task-dependent manner. In the present experiments, participants read sentences about an object. The sentences implied that the object had a specific shape or orientation. They then either named a picture of that object (Experiments 1 and 3) or decided whether the object had been mentioned in the sentence (Experiment 2). Orientation information did not reliably influence performance in any of the experiments. Shape representations influenced performance most strongly when participants were asked to compare a sentence with a picture or when they were explicitly asked to use mental imagery while reading the sentences. Thus, in contrast to previous claims, implied visual information often does not contribute substantially to the comprehension process during normal reading.},
journal = {Psychological Science},
author = {Rommers, J. and Meyer, A. S. and Huettig, F.},
month = nov,
year = {2013},
pages = {2218-2225},
file = {D:\\core\\reading\\psychological science\\Psychological Science-2013-Rommers-2218-25.pdf;D:\\core\\reading\\storage\\ZNRDC2PE\\DS_10.1177_0956797613490746.pdf}
}
@article{barrRandomEffectsStructure2013,
title = {Random Effects Structure for Confirmatory Hypothesis Testing: {{Keep}} It Maximal},
volume = {68},
issn = {0749-596X},
doi = {10.1016/j.jml.2012.11.001},
abstract = {Linear mixed-effects models (LMEMs) have become increasingly prominent in psycholinguistics and related areas. However, many researchers do not seem to appreciate how random effects structures affect the generalizability of an analysis. Here, we argue that researchers using LMEMs for confirmatory hypothesis testing should minimally adhere to the standards that have been in place for many decades. Through theoretical arguments and Monte Carlo simulation, we show that LMEMs generalize best when they include the maximal random effects structure justified by the design. The generalization performance of LMEMs including data-driven random effects structures strongly depends upon modeling criteria and sample size, yielding reasonable results on moderately-sized samples when conservative criteria are used, but with little or no power advantage over maximal models. Finally, random-intercepts-only LMEMs used on within-subjects and/or within-items data from populations where subjects and/or items vary in their sensitivity to experimental manipulations always generalize worse than separate F1 and F2 tests, and in many cases, even worse than F1 alone. Maximal LMEMs should be the `gold standard' for confirmatory hypothesis testing in psycholinguistics and beyond.},
journal = {Journal of Memory and Language},
author = {Barr, Dale J. and Levy, Roger and Scheepers, Christoph and Tily, Harry J.},
year = {2013},
keywords = {Statistics,Linear mixed-effects models,Generalization,Monte Carlo simulation},
pages = {255-278},
file = {D:\\core\\reading\\jml\\1-s2.0-S0749596X12001180-mmc1.pdf;D:\\core\\reading\\jml\\1-s2.0-S0749596X17300013-main.pdf}
}
@article{ZwaanRevisitingMentalSimulation2012,
title = {Revisiting {{Mental Simulation}} in {{Language Comprehension}}: {{Six Replication Attempts}}},
volume = {7},
doi = {10.1371/journal.pone.0051382},
abstract = {The notion of language comprehension as mental simulation has become popular in cognitive science. We revisit some of the original empirical evidence for this. Specifically, we attempted to replicate the findings from earlier studies that examined the mental simulation of object orientation, shape, and color, respectively, in sentence-picture verification. For each of these sets of findings, we conducted two web-based replication attempts using Amazon's Mechanical Turk. Our results are mixed. Participants responded faster to pictures that matched the orientation or shape implied by the sentence, replicating the original findings. The effect was larger and stronger for shape than orientation. Participants also responded faster to pictures that matched the color implied by the sentence, whereas the original studies obtained $<$italic$>$mis$<$/italic$>$match advantages. We argue that these results support mental simulation theory, show the importance of replication studies, and show the viability of web-based data collection.},
journal = {PLoS ONE},
author = {Zwaan, Rolf A. and Pecher, Diane},
year = {2012},
pages = {e51382},
file = {D:\\core\\reading\\PLOS\\journal.pone.0051382.pdf;D:\\core\\reading\\PLOS\\journal.pone.0051382.t001.png}
}
@article{FaulStatisticalpoweranalyses2009,
title = {Statistical Power Analyses Using {{G}}*{{Power}} 3.1: Tests for Correlation and Regression Analyses},
volume = {41},
issn = {1554-3528 (Electronic) 1554-351X (Linking)},
doi = {10.3758/BRM.41.4.1149},
abstract = {G*Power is a free power analysis program for a variety of statistical tests. We present extensions and improvements of the version introduced by Faul, Erdfelder, Lang, and Buchner (2007) in the domain of correlation and regression analyses. In the new version, we have added procedures to analyze the power of tests based on (1) single-sample tetrachoric correlations, (2) comparisons of dependent correlations, (3) bivariate linear regression, (4) multiple linear regression based on the random predictor model, (5) logistic regression, and (6) Poisson regression. We describe these new features and provide a brief introduction to their scope and handling.},
journal = {Behavior Research Methods},
author = {Faul, F. and Erdfelder, E. and Buchner, A. and Lang, A. G.},
month = nov,
year = {2009},
keywords = {Psychology,REGRESSION analysis,Algorithms,*Data Interpretation; Statistical,Linear Models,*Software},
pages = {1149-1160},
file = {D:\\core\\reading\\Behavior Research Methods, Instruments, & Computers\\GPower31-BRM-Paper.pdf}
}
@article{barsalouSimulationSituatedConceptualization2009,
title = {Simulation, Situated Conceptualization, and Prediction},
volume = {364},
issn = {1471-2970 (Electronic) 0962-8436 (Linking)},
doi = {10.1098/rstb.2008.0319},
abstract = {Based on accumulating evidence, simulation appears to be a basic computational mechanism in the brain that supports a broad spectrum of processes from perception to social cognition. Further evidence suggests that simulation is typically situated, with the situated character of experience in the environment being reflected in the situated character of the representations that underlie simulation. A basic architecture is sketched of how the brain implements situated simulation. Within this framework, simulators implement the concepts that underlie knowledge, and situated conceptualizations capture patterns of multi-modal simulation associated with frequently experienced situations. A pattern completion inference mechanism uses current perception to activate situated conceptualizations that produce predictions via simulations on relevant modalities. Empirical findings from perception, action, working memory, conceptual processing, language and social cognition illustrate how this framework produces the extensive prediction that characterizes natural intelligence.},
journal = {Philos Trans R Soc Lond B Biol Sci},
author = {Barsalou, Lawrence W.},
year = {2009},
keywords = {language,Humans,Brain/*physiology,*Models; Neurological,Cognition/*physiology,Memory/physiology,Concept Formation/*physiology,Intelligence/*physiology,Perception/physiology},
pages = {1281-1289},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\QFWJZGBG\\Barsalou_PTRSL-BS_2009_prediction.pdf}
}
@book{fodorLOTLanguageThought2008,
address = {Oxford},
title = {{{LOT}} 2: {{The}} Language of Thought Revisited},
isbn = {0-19-954877-3},
publisher = {{Oxford University Press}},
author = {Fodor, Jerry},
year = {2008},
file = {D:\\core\\reading\\Booknote\\LOT2.pdf}
}
@book{baayenAnalyzingLinguisticData2008,
edition = {1},
title = {Analyzing {{Linguistic Data}}: {{A}} Practical Introduction to Statistics Using {{R}}},
isbn = {978-0-521-70918-7},
publisher = {{Cambridge University Press}},
author = {Baayen, R. Harald},
month = mar,
year = {2008},
keywords = {Mixed-effects models,theoretical quantiles; vocabulary growth; etymological age; complex synsets; data distribution object; inflectional entropy; tag trigrams; paired vectors; inanimate recipients; morphological family size; crossed random effects; restricted cubic spline;},
file = {D:\\core\\reading\\Booknote\\baayenCUPstats.pdf}
}
@article{connellRepresentingObjectColour2007,
title = {Representing Object Colour in Language Comprehension},
volume = {102},
issn = {0010-0277},
doi = {http://dx.doi.org/10.1016/j.cognition.2006.02.009},
abstract = {Embodied theories of cognition hold that mentally representing something red engages the neural subsystems that respond to environmental perception of that colour. This paper examines whether implicit perceptual information on object colour is represented during sentence comprehension even though doing so does not necessarily facilitate task performance. After reading a sentence that implied a particular colour for a given object, participants were presented with a picture of the object that either matched or mismatched the implied colour. When asked if the pictured object was mentioned in the preceding sentence, people's responses were faster when the colours mismatched than when they matched, suggesting that object colour is represented differently to other object properties such as shape and orientation. A distinction between stable and unstable embodied representations is proposed to allow embodied theories to account for these findings.},
journal = {Cognition},
author = {Connell, Louise},
month = mar,
year = {2007},
keywords = {embodied cognition,mental representation,Perception,Language comprehension,Colour,Stability},
pages = {476-485},
file = {D:\\core\\reading\\cognition\\1-s2.0-S0010027706000606-main.pdf}
}
@article{ZwaanLanguageComprehendersMentally2002,
title = {Language {{Comprehenders Mentally Represent}} the {{Shapes}} of {{Objects}}},
volume = {13},
doi = {10.1111/1467-9280.00430},
abstract = {We examined the prediction that people activate perceptual symbols during language comprehension. Subjects read sentences describing an animal or object in a certain location. The shape of the object or animal changed as a function of its location (e.g., eagle in the sky, eagle in a nest). However, this change was only implied by the sentences. After reading a sentence, subjects were presented with a line drawing of the object in question. They judged whether the object had been mentioned in the sentence (Experiment 1) or simply named the object (Experiment 2). In both cases, responses were faster when the pictured object's shape matched the shape implied by the sentence than when there was a mismatch. These results support the hypothesis that perceptual symbols are routinely activated in language comprehension.},
journal = {Psychological Science},
author = {Zwaan, Rolf A. and Stanfield, Robert A. and Yaxley, Richard H.},
month = mar,
year = {2002},
pages = {168-171},
file = {D:\\core\\reading\\psychological science\\Psychological Science-2002-Zwaan-168-71.pdf}
}
@article{GlenbergGroundinglanguageaction2002,
title = {Grounding Language in Action},
volume = {9},
issn = {1069-9384},
doi = {10.3758/bf03196313},
language = {English},
journal = {Psychonomic Bulletin \& Review},
author = {Glenberg, ArthurM and Kaschak, MichaelP},
month = sep,
year = {2002},
pages = {558-565},
file = {D:\\core\\reading\\Psychonomic B&R\\10.3758_BF03196313.pdf}
}
@article{barsalouPerceptualSymbolSystems1999,
title = {Perceptual Symbol Systems},
volume = {22},
doi = {10.1017/S0140525X99002149},
journal = {Behavioral and Brain Sciences},
author = {Barsalou, Lawrence W.},
year = {1999},
pages = {577-660},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\24WTCTI6\\Barsalou_BBS_1999_perceptual_symbol_systems.pdf}
}
@article{CohenMentalRotationMental1993,
title = {Mental {{Rotation}}, {{Mental Representation}}, and {{Flat Slopes}}},
volume = {25},
issn = {0010-0285},
doi = {10.1006/cogp.1993.1009},
abstract = {The "mental rotation" literature has studied how subjects determine whether two stimuli that differ in orientation have the same handedness. This literature implies that subjects perform the task by imagining the rotation of one of the stimuli to the orientation of the other. This literature has spawned several theories of mental representation. These theories imply that mental representations cannot be both orientation-free and handedness-specific. We present four experiments that demonstrate the contrary: mental representations can be both orientation-free and handedness-specific. In Experiment 1 we serendipitously discovered a version of R. N. Shepard and J. Metzler${'}$s (1971) "mental rotation" task in which subjects accurately discover the handedness of a stimulus without using "mental rotation," i.e., in which reaction time to compare the handedness of two forms is not a function of the angular disparity between the two forms. In Experiment 2 we generalize this finding to different experimental procedures. In Experiment 3 we replicate this finding with a much larger group of subjects. In Experiment 4 we show that when we preclude the formation of an orientation-free representation by never repeating a polygon, subjects carry out the handedness comparison task by performing "mental rotation."},
journal = {Cognitive Psychology},
author = {Cohen, D. and Kubovy, M.},
year = {1993},
pages = {351-382},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\PDADGINB\\Cohen and Kubovy 93.pdf}
}
@incollection{ZwaanEmbodiedsentencecomprehension2005,
address = {Cambridge, UK},
title = {Embodied Sentence Comprehension},
booktitle = {Grounding Cognition: {{The}} Role of Perception and Action in Memory, Language, and Thinking},
publisher = {{Cambridge University Press}},
author = {Zwaan, Rolf A. and Madden, Carol J.},
editor = {Pecher, Diane and Zwaan, Rolf A.},
year = {2005},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\DUNUV23R\\zwaan 05 bookgrounding cognition.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\EKJPXRAU\\zwaan 05 bookgrounding cognition.pdf}
}
@article{zwaanSeeingActingUnderstanding2006,
title = {Seeing, Acting, Understanding: {{Motor}} Resonance in Language Comprehension},
volume = {135},
copyright = {(c) 2012 APA, all rights reserved},
issn = {1939-2222(Electronic);0096-3445(Print)},
shorttitle = {Seeing, Acting, Understanding},
doi = {10.1037/0096-3445.135.1.1},
abstract = {Observing actions and understanding sentences about actions activates corresponding motor processes in the observer-comprehender. In 5 experiments, the authors addressed 2 novel questions regarding language-based motor resonance. The 1st question asks whether visual motion that is associated with an action produces motor resonance in sentence comprehension. The 2nd question asks whether motor resonance is modulated during sentence comprehension. The authors' experiments provide an affirmative response to both questions. A rotating visual stimulus affects both actual manual rotation and the comprehension of manual rotation sentences. Motor resonance is modulated by the linguistic input and is a rather immediate and localized phenomenon. The results are discussed in the context of theories of action observation and mental simulation.},
number = {1},
journal = {Journal of Experimental Psychology: General},
author = {Zwaan, Rolf A. and Taylor, Lawrence J.},
year = {2006},
keywords = {*Language,*Comprehension,*Motor Processes,*Visual Stimulation,Sentences},
pages = {1-11},
file = {D:\\core\\reading\\JEP_G\\JEPGen_06.pdf}
}
@article{MathotOpenSesameopensourcegraphical2011,
title = {{{OpenSesame}}: {{An}} Open-Source, Graphical Experiment Builder for the Social Sciences},
volume = {44},
issn = {1554-351X},
shorttitle = {{{OpenSesame}}},
doi = {10.3758/s13428-011-0168-7},
abstract = {In the present article, we introduce OpenSesame, a graphical experiment builder for the social sciences. OpenSesame is free, open-source, and cross-platform. It features a comprehensive and intuitive graphical user interface and supports Python scripting for complex tasks. Additional functionality, such as support for eyetrackers, input devices, and video playback, is available through plug-ins. OpenSesame can be used in combination with existing software for creating experiments.},
number = {2},
journal = {Behavior Research Methods},
author = {Math\^ot, Sebastiaan and Schreij, Daniel and Theeuwes, Jan},
month = nov,
year = {2011},
keywords = {Cognitive Psychology,Software,Stimulus presentation,Experiment builder,Python,Graphical user interface},
pages = {314-324},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\5G9K9WSQ\\Mathôt 等。 - 2011 - OpenSesame An open-source, graphical experiment b.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\H6GT5WQZ\\Mathôt 等。 - 2011 - OpenSesame An open-source, graphical experiment b.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\AN2I2W4M\\10.html},
pmid = {22083660},
pmcid = {PMC3356517}
}
@article{LiERPsStudyMental2017,
title = {{An ERPs Study on the Mental Simulation of Implied Object Color Information during Chinese Sentence Comprehension}},
volume = {40},
doi = {10.16719/j.cnki.1671-6981.20170105},
abstract = {当前研究采用事件相关电位(ERPs)技术,同时沿用经典的句图匹配范式,考察句子理解中物体典型颜色与非典型颜色的心理加工过程在脑电活动上的反映。实验中被试先阅读句子再判断句子后呈现的图片物体是否在句子中出现过,句子中隐含的物体颜色或是关键物体的典型颜色或是非典型颜色。实验结果发现,典型颜色句子版本下句图不匹配条件比句图匹配条件引发了更大的N400效应;而非典型颜色句子版本下两者N400差异不显著。研究结果表明,人们在汉语句子理解过程中会实时对隐含的物体颜色信息进行心理模拟。并且,句子隐含物体颜色的典型性是造成匹配易化或不匹配易化的关键因素之一。
Embodied cognition theory assumes that while sentence comprehension, readers would activate the perceptual mental representation of what they read, such as object shape, orientation etc. Some of previous studies that including implicit object color information in language comprehension support the hypothesis of embodied cognition theory, however others do not. The current research intended to investigate mental simulation of implied object color during sentence reading, which the prototypicality of object color is considered. The current experiment is 2\texttimes{}2 two factors within-subjects design. One of the independent variables is the object color implied in the sentence (typical color or atypical color of the object); the other is sentence-picture matching condition (match or mismatch). The dependent variables are the reaction times and the brain electrical activities (N400 as index) when participants judge pictures after reading sentences. During the experiment, participants first read sentences implying a typical or atypical color of one object and then watched the pictures. They were asked to judge whether the object in the picture was mentioned in previous sentences. All of subjects' reaction times were analyzed using 2\texttimes{}2 repeated measures ANOVAs. The result showed that reaction times of the pictures after sentences implying typical color of objects were faster than those after sentences implying atypical color. Reaction times of pictures in matched condition were faster than those in mismatched condition. After all, the interaction effect between two factors was not significant. Besides behavioral results, subjects' ERPs data were analyzed using 2\texttimes{}5\texttimes{}5 repeated measures ANOVAs separately in both cases of typical color sentence and atypical color sentence. Analyzing factors were sentence-picture matching condition (match or mismatch), brain posteriority (frontal, fronto-central, central, centro-parietal, parietal), and hemisphere laterality (left temporal, left, midline, right, right temporal). The results showed that, after reading sentences implying typical color, the peaks of N400 elicited in the mismatch condition were more negative compared to the match condition. On the contrary, after reading sentences implying atypical color, the peaks of N400 elicited between the match condition and mismatch condition were similar. Both behavioral results and ERP findings together suggested that, readers do activate perceptual mental representation of objects and their properties during sentence comprehension, even characters of objects were not directly mentioned. More importantly, the prototypicality of object color implied in the sentence has influence on the specific activation of the object color representation. When reading sentences implying objects' atypical colors, mental representation of both typical and atypical color were activated, therefore had different impacts on the sentence comprehension and other related cognitive processing. The current experiment is 2\texttimes{}2 two factors within-subjects design. One of the independent variables is the object color implied in the sentence (typical color or atypical color of the object); the other is sentence-picture matching condition (match or mismatch). The dependent variables are the reaction times and the brain electrical activities (N400 as index) when participants judge pictures after reading sentences. During the experiment, participants first read sentences implying a typical or atypical color of one object and then watched the pictures. They were asked to judge whether the object in the picture was mentioned in the previous sentences. All of subjects' reaction times were analyzed using 2\texttimes{}2 repeated measures ANOVAs, with the object color implied in the sentence (typical color or atypical color) and sentence-picture matching condition (match or mismatch) as factors. The result showed the main effect of sentence version was significant, reaction times of the pictures after sentences implying typical color of objects were faster than those after sentences implying atypical color; the main effect of sentence-picture matching condition was significant, reaction times of pictures in matched condition were faster than those in mismatched condition; after all, the interaction effect between two factors was not significant. Besides behavioral results, All of subjects' ERPs data were analyzed using 2\texttimes{}5\texttimes{}5 repeated measures ANOVAs separately in both cases of typical color sentence and atypical color sentence. Analyzing factors were sentence-picture matching condition (match or mismatch), brain posteriority (frontal, fronto-central, central, centro-parietal, parietal), and hemisphere laterality (left temporal, left, midline, right, right temporal). The results showed that after reading sentences implying typical color the main effect of sentence-picture matching condition was significant at the 300-500ms interval. The peaks of N400 elicited in the mismatch condition were more negative compared to the match condition. On the contrary, after reading sentences implying atypical color the main effect of sentence-picture matching condition was not significant at the 300-500ms interval. The peaks of N400 elicited between the match condition and mismatch condition were similar. Both behavioral results and ERP findings together suggested that readers do activate perceptual mental representation of objects and their properties during sentence comprehension, even characters of objects were not directly mentioned. More importantly, the prototypicality of object color implied in the sentence has influence on the specific activation of the object color representation. When reading sentences implying objects' typical colors, only mental representation of typical color of the objects were activated, however when reading sentences implying objects' atypical colors, mental representation of both typical color and atypical color of the objects were activated, therefore had different impact on the sentence comprehension and other related cognitive processing.},
language = {中文},
number = {1},
journal = {Journal of Psychological Science},
author = {Li, Ying and Shang, Lingling},
year = {2017},
keywords = {embodied cognition,event-related potentials,Mental simulation,Chinese sentence comprehension,Color prototypicality},
pages = {29-36},
file = {D:\\core\\reading\\psychological science(China)\\psysci16-092.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\H6RA83Z7\\abstract9822.html}
}
@article{pecherLanguageComprehendersRetain2009,
title = {Language Comprehenders Retain Implied Shape and Orientation of Objects},
volume = {62},
issn = {1747-0218},
doi = {10.1080/17470210802633255},
abstract = {According to theories of embodied cognition, language comprehenders simulate sensorimotor experiences to represent the meaning of what they read. Previous studies have shown that picture recognition is better if the object in the picture matches the orientation or shape implied by a preceding sentence. In order to test whether strategic imagery may explain previous findings, language comprehenders first read a list of sentences in which objects were mentioned. Only once the complete list had been read was recognition memory tested with pictures. Recognition performance was better if the orientation or shape of the object matched that implied by the sentence, both immediately after reading the complete list of sentences and after a 45-min delay. These results suggest that previously found match effects were not due to strategic imagery and show that details of sensorimotor simulations are retained over longer periods.},
number = {6},
journal = {The Quarterly Journal of Experimental Psychology},
author = {Pecher, Diane and {van Dantzig}, Saskia and Zwaan, Rolf A. and Zeelenberg, Ren\'e},
month = jun,
year = {2009},
pages = {1108-1114},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\WS6MCIE8\\QJEP09PecherVanDantzigZwaanZeelenberg.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\VHCVHTS3\\17470210802633255.html}
}
@article{stanfield_effect_2001,
title = {The Effect of Implied Orientation Derived from Verbal Context on Picture Recognition},
volume = {12},
issn = {0956-7976},
doi = {10.1111/1467-9280.00326},
abstract = {Perceptual symbol systems assume an analogue relationship between a symbol and its referent, whereas amodal symbol systems assume an arbitrary relationship between a symbol and its referent. According to perceptual symbol theories, the complete representation of an object, called a simulation, should reflect physical characteristics of the object. Amodal theories, in contrast, do not make this prediction. We tested the hypothesis, derived from perceptual symbol theories, that people mentally represent the orientation of an object implied by a verbal description. Orientation (vertical-horizontal) was manipulated by having participants read a sentence that implicitly suggested a particular orientation for an object. Then recognition latencies to pictures of the object in each of the two orientations were measured. Pictures matching the orientation of the object implied by the sentence were responded to faster than pictures that did not match the orientation. This finding is interpreted as offering support for theories positing perceptual symbol systems.},
language = {eng},
number = {2},
journal = {Psychological Science},
author = {Stanfield, Robert A. and Zwaan, Rolf A.},
month = mar,
year = {2001},
keywords = {Memory,Perception,Adult,Female,Humans,Male,Association Learning,Models; Psychological,Cues,Recognition (Psychology),Form Perception},
pages = {153-156},
file = {D:\\core\\reading\\psychological science\\Psychological Science-2001-Stanfield-153-6.pdf},
pmid = {11340925}
}
@article{coltheart_modularity_1999,
title = {Modularity and Cognition},
volume = {3},
issn = {1364-6613},
doi = {10.1016/S1364-6613(99)01289-9},
abstract = {Modularity is a concept central to cognitive science, and Fodor's analysis of cognitive modularity in his book The Modularity Of Mind has been widely influential \textendash{} but also widely misunderstood. It is often claimed that the possession of some or other system-property is a necessary condition for that system to be modular in Fodor's sense, but Fodor made it clear that he was not proposing a definition of modularity, nor proposing any necessary conditions for the applicability of the term. He was simply suggesting a number of system properties that are typical of modular systems. I argue that it is nevertheless possible to derive a useful definition of modularity from the kinds of arguments put forward by Fodor: A cognitive system is modular when and only when it is domain-specific. Given any such proposed module, the other features of modularity discussed by Fodor should be dealt with as empirical issues: for each feature (innateness, for example), it is an empirical question whether or not the proposed module has that feature.},
number = {3},
journal = {Trends in Cognitive Sciences},
author = {Coltheart, Max},
month = mar,
year = {1999},
keywords = {Cognition,Modularity,Fodor,Information encapsulation,Mind},
pages = {115-120},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\TP99ZUIM\\Coltheart - 1999 - Modularity and cognition.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\D9NZVGKF\\S1364661399012899.html}
}
@article{brodeurBankStandardizedStimuli2014,
title = {Bank of {{Standardized Stimuli}} ({{BOSS}}) {{Phase II}}: 930 {{New Normative Photos}}},
volume = {9},
shorttitle = {Bank of {{Standardized Stimuli}} ({{BOSS}}) {{Phase II}}},
doi = {10.1371/journal.pone.0106953},
abstract = {Researchers have only recently started to take advantage of the developments in technology and communication for sharing data and documents. However, the exchange of experimental material has not taken advantage of this progress yet. In order to facilitate access to experimental material, the Bank of Standardized Stimuli (BOSS) project was created as a free standardized set of visual stimuli accessible to all researchers, through a normative database. The BOSS is currently the largest existing photo bank providing norms for more than 15 dimensions (e.g. familiarity, visual complexity, manipulability, etc.), making the BOSS an extremely useful research tool and a mean to homogenize scientific data worldwide. The first phase of the BOSS was completed in 2010, and contained 538 normative photos. The second phase of the BOSS project presented in this article, builds on the previous phase by adding 930 new normative photo stimuli. New categories of concepts were introduced, including animals, building infrastructures, body parts, and vehicles and the number of photos in other categories was increased. All new photos of the BOSS were normalized relative to their name, familiarity, visual complexity, object agreement, viewpoint agreement, and manipulability. The availability of these norms is a precious asset that should be considered for characterizing the stimuli as a function of the requirements of research and for controlling for potential confounding effects.},
number = {9},
journal = {PLoS ONE},
author = {Brodeur, Mathieu B. and Gu\'erard, Katherine and Bouras, Maria},
month = sep,
year = {2014},
pages = {e106953},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\8PIXBP23\\Brodeur 等。 - 2014 - Bank of Standardized Stimuli (BOSS) Phase II 930 .pdf}
}
@article{boninNewSet2992003,
title = {A New Set of 299 Pictures for Psycholinguistic Studies: {{French}} Norms for Name Agreement, Image Agreement, Conceptual Familiarity, Visual Complexity, Image Variability, Age of Acquisition, and Naming Latencies},
volume = {35},
issn = {0743-3808, 1532-5970},
shorttitle = {A New Set of 299 Pictures for Psycholinguistic Studies},
doi = {10.3758/BF03195507},
abstract = {Pictures are often used as stimuli in studies of perception, language, and memory. Since performances on different sets of pictures are generally contrasted, stimulus selection requires the use of standardized material to match pictures across different variables. Unfortunately, the number of standardized pictures available for empirical research is rather limited. The aim of the present study is to provide French normative data for a new set of 299 black-and-white drawings. Alario and Ferrand (1999) were closely followed in that the pictures were standardized on six variables: name agreement, image agreement, conceptual familiarity, visual complexity, image variability, and age of acquisition. Objective frequency measures are also provided for the most common names associated with the pictures. Comparative analyses between our results and the norms obtained in other, similar studies are reported. Finally, naming latencies corresponding to the set of pictures were also collected from French native speakers, and correlational/multiple-regression analyses were performed on naming latencies. This new set of standardized pictures is available on the Internet (http://leadserv.u-bourgogne.fr/bases/pictures/) and should be of great use to researchers when they select pictorial stimuli.},
language = {en},
number = {1},
journal = {Behavior Research Methods, Instruments, \& Computers},
author = {Bonin, Patrick and Peereman, Ronald and Malardier, Nathalie and M\'eot, Alain and Chalard, Maryl\`ene},
month = feb,
year = {2003},
keywords = {Behavioral Science,Cognitive Psychology},
pages = {158-167},
file = {D:\\core\\reading\\Behavior Research Methods, Instruments, & Computers\\Bonin-BRM-2003.zip;D:\\core\\Version_Controls\\zotero_data\\storage\\DP9ABZI8\\Bonin 等。 - 2003 - A new set of 299 pictures for psycholinguistic stu.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\KET94KP2\\art%3A10.3758%2FBF03195507.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\RANIUXQX\\10.html}
}
@incollection{connellColourStabilityEmbodied2005,
address = {Mahwah, NJ: Lawrence Erlbaum},
title = {Colour and Stability in Embodied Representations},
booktitle = {Proceedings of the Twenty-Seventh Annual Conference of the Cognitive Science Society.},
author = {Connell, Louise},
collaborator = {Bara, B and Barsalou, Lawrence W. and Bucciarelli, M},
year = {2005},
pages = {482-487},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\D5F89FRF\\Connell-2005-CogSci.pdf}
}
@article{eerlandRegisteredReplicationReport2016,
title = {Registered {{Replication Report}}: {{Hart}} \& {{Albarracin}} (2011)},
volume = {11},
issn = {1745-6916, 1745-6924},
shorttitle = {Registered {{Replication Report}}},
doi = {10.1177/1745691615605826},
abstract = {Language can be viewed as a complex set of cues that shape people's mental representations of situations. For
example, people think of behavior described using imperfective aspect (i.e., what a person was doing) as a
dynamic, unfolding sequence of actions, whereas the same behavior described using perfective aspect (i.e., what a
person did) is perceived as a completed whole. A recent study found that aspect can also influence how we think
about a person's intentions (Hart \& Albarrac\'in, 2011). Participants judged actions described in imperfective as being
more intentional (d between 0.67 and 0.77) and they imagined these actions in more detail (d = 0.73). The fact
that this finding has implications for legal decision making, coupled with the absence of other direct replication
attempts, motivated this registered replication report (RRR). Multiple laboratories carried out 12 direct replication
studies, including one MTurk study. A meta-analysis of these studies provides a precise estimate of the size of this
effect free from publication bias. This RRR did not find that grammatical aspect affects intentionality (d between 0
and -0.24) or imagery (d = -0.08). We discuss possible explanations for the discrepancy between these results and
those of the original study.},
language = {en},
number = {1},
journal = {Perspectives on Psychological Science},
author = {Eerland, Anita and Sherrill, A. M. and Magliano, Joseph P. and Zwaan, Rolf A.},
month = jan,
year = {2016},
pages = {158-171},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\DBWS3S6T\\Perspectives_on_Psychological_Science_20.pdf}
}
@article{winterLanguageComprehendersRepresent2012,
title = {Language Comprehenders Represent Object Distance Both Visually and Auditorily},
volume = {4},
issn = {1866-9859},
doi = {10.1515/langcog-2012-0001},
abstract = {When they process sentences, language comprehenders activate perceptual and motor representations of described scenes. On the ``immersed experiencer'' account, comprehenders engage motor and perceptual systems to create experiences that someone participating in the described scene would have. We tested two predictions of this view. First, the distance of mentioned objects from the protagonist of a described scene should produce perceptual correlates in mental simulations. And second, mental simulation of perceptual features should be multimodal, like actual perception of such features. In Experiment 1, we found that language about objects at different distances modulated the size of visually simulated objects. In Experiment 2, we found a similar effect for volume in the auditory modality. These experiments lend support to the view that language-driven mental simulation encodes experiencer-specific spatial details. The fact that we obtained similar simulation effects for two different modalities\textemdash{}audition and vision\textemdash{}confirms the multimodal nature of mental simulations during language understanding.},
number = {01},
journal = {Language and Cognition},
author = {Winter, Bodo and Bergen, Benjamin},
month = mar,
year = {2012},
pages = {1--16},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\BPJRFUSV\\Winter 與 Bergen - 2012 - Language comprehenders represent object distance b.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\7MTUQBZW\\displayAbstract.html}
}
@article{EngelenPerceptualsimulationdeveloping2011,
title = {Perceptual Simulation in Developing Language Comprehension},
volume = {110},
issn = {0022-0965},
doi = {10.1016/j.jecp.2011.06.009},
abstract = {We tested an embodied account of language proposing that comprehenders create perceptual simulations of the events they hear and read about. In Experiment 1, children (ages 7\textendash{}13 years) performed a picture verification task. Each picture was preceded by a prerecorded spoken sentence describing an entity whose shape or orientation matched or mismatched the depicted object. Responses were faster for matching pictures, suggesting that participants had formed perceptual-like situation models of the sentences. The advantage for matching pictures did not increase with age. Experiment 2 extended these findings to the domain of written language. Participants (ages 7\textendash{}10 years) of high and low word reading ability verified pictures after reading sentences aloud. The results suggest that even when reading is effortful, children construct a perceptual simulation of the described events. We propose that perceptual simulation plays a more central role in developing language comprehension than was previously thought.},
number = {4},
journal = {Journal of Experimental Child Psychology},
author = {Engelen, Jan A. A. and Bouwmeester, Samantha and {de Bruin}, Anique B. H. and Zwaan, Rolf A.},
month = dec,
year = {2011},
keywords = {embodied cognition,Word reading,Reading comprehension,Language comprehension,Perceptual simulation,Language development},
pages = {659-675},
file = {D:\\core\\reading\\unsort\\Engelen 等。 - 2011 - Perceptual simulation in developing language compr.pdf}
}
@article{yaxleySimulatingVisibilityLanguage2007,
title = {Simulating Visibility during Language Comprehension},
volume = {105},
issn = {0010-0277},
doi = {10.1016/j.cognition.2006.09.003},
abstract = {In this study, participants performed a sentence\textendash{}picture verification task in which they read sentences about an agent viewing an object (e.g., moose) through a differentially occlusive medium (e.g., clean vs. fogged goggles), and then verified whether a subsequently pictured object was mentioned in the previous sentence. Picture verification latencies were shorter when the resolution of the pictured object and the resolution implied by the sentence matched than when they did not. These results suggest that the degree of visibility implied in linguistic context can influence immediate object interpretation. These data suggest that readers mentally simulate the visibility of objects during language comprehension. Thus, the simulation of linguistic descriptions is not limited to the activation of intrinsic object properties (e.g., object shape), but also invokes the perceptibility of referential objects given implied environmental context.},
number = {1},
journal = {Cognition},
author = {Yaxley, Richard H. and Zwaan, Rolf A.},
month = oct,
year = {2007},
keywords = {Language comprehension,Perceptual representations,Mental simulation,Object visibility},
pages = {229-236},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\E4NG8FXI\\Yaxley 與 Zwaan - 2007 - Simulating visibility during language comprehensio.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\NMVR34RK\\S0010027706001971.html}
}
@article{morey_bayes_2011,
title = {Bayes Factor Approaches for Testing Interval Null Hypotheses},
volume = {16},
issn = {1939-1463},
doi = {10.1037/a0024377},
abstract = {Psychological theories are statements of constraint. The role of hypothesis testing in psychology is to test whether specific theoretical constraints hold in data. Bayesian statistics is well suited to the task of finding supporting evidence for constraint, because it allows for comparing evidence for 2 hypotheses against each another. One issue in hypothesis testing is that constraints may hold only approximately rather than exactly, and the reason for small deviations may be trivial or uninteresting. In the large-sample limit, these uninteresting, small deviations lead to the rejection of a useful constraint. In this article, we develop several Bayes factor 1-sample tests for the assessment of approximate equality and ordinal constraints. In these tests, the null hypothesis covers a small interval of non-0 but negligible effect sizes around 0. These Bayes factors are alternatives to previously developed Bayes factors, which do not allow for interval null hypotheses, and may especially prove useful to researchers who use statistical equivalence testing. To facilitate adoption of these Bayes factor tests, we provide easy-to-use software.},
language = {eng},
number = {4},
journal = {Psychological Methods},
author = {Morey, Richard D. and Rouder, Jeffrey N.},
month = dec,
year = {2011},
keywords = {Psychology,Statistics as Topic,Research design,Bayes Theorem,Data Interpretation; Statistical},
pages = {406-419},
file = {D:\\core\\reading\\PsyMethods\\0104066.pdf},
pmid = {21787084}
}
@article{dekoningSizeDoesMatter2017,
title = {Size {{Does Matter}}: {{Implied Object Size}} Is {{Mentally Simulated During Language Comprehension}}},
volume = {54},
issn = {0163-853X},
shorttitle = {Size {{Does Matter}}},
doi = {10.1080/0163853X.2015.1119604},
abstract = {Embodied theories of language comprehension propose that readers construct a mental simulation of described objects that contains perceptual characteristics of their real-world referents. The present study is the first to investigate directly whether implied object size is mentally simulated during sentence comprehension and to study the potential influence of developmental factors on mental simulation by comparing adults' and children's mental simulation processing. Participants performed a sentence-picture verification task in which they read a sentence that implied a large or a small size for an object and then saw a picture of the object that matched or mismatched the implied size. Responses to pictures were faster when implied size and pictured size matched, suggesting that readers activated perceptual information on object size during sentence comprehension. The magnitude of the match effect was equal across age groups. The results contribute to refining and advancing knowledge with respect to the nature of mental simulations.},
journal = {Discourse Processes},
author = {{de Koning}, Bj\"orn B. and Wassenburg, Stephanie I. and Bos, Lisanne T. and der Schoot, Menno Van},
year = {2017},
pages = {493-503},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\RRPNAK8K\\Koning 等。 - 2016 - Size Does Matter Implied Object Size is Mentally .pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\AQZ2ED8P\\0163853X.2015.html}
}
@article{wagenmakersRegisteredReplicationReport2016,
title = {Registered {{Replication Report Strack}}, {{Martin}}, \& {{Stepper}} (1988)},
issn = {1745-6916, 1745-6924},
doi = {10.1177/1745691616674458},
abstract = {According to the facial feedback hypothesis, people's affective responses can be influenced by their own facial expression (e.g., smiling, pouting), even when their expression did not result from their emotional experiences. For example, Strack, Martin, and Stepper (1988) instructed participants to rate the funniness of cartoons using a pen that they held in their mouth. In line with the facial feedback hypothesis, when participants held the pen with their teeth (inducing a ``smile''), they rated the cartoons as funnier than when they held the pen with their lips (inducing a ``pout''). This seminal study of the facial feedback hypothesis has not been replicated directly. This Registered Replication Report describes the results of 17 independent direct replications of Study 1 from Strack et al. (1988), all of which followed the same vetted protocol. A meta-analysis of these studies examined the difference in funniness ratings between the ``smile'' and ``pout'' conditions. The original Strack et al. (1988) study reported a rating difference of 0.82 units on a 10-point Likert scale. Our meta-analysis revealed a rating difference of 0.03 units with a 95\% confidence interval ranging from -0.11 to 0.16.},
language = {en},
journal = {Perspectives on Psychological Science},
author = {Wagenmakers, E.-J. and Beek, Titia and Dijkhoff, Laura and Gronau, Quentin F. and Acosta, A. and Adams, R. B. and Albohn, D. N. and Allard, E. S. and Benning, S. D. and {Blouin-Hudon}, E.-M. and Bulnes, L. C. and Caldwell, T. L. and {Calin-Jageman}, R. J. and Capaldi, C. A. and Carfagno, N. S. and Chasten, K. T. and Cleeremans, A. and Connell, L. and DeCicco, J. M. and Dijkstra, K. and Fischer, A. H. and Foroni, F. and Hess, U. and Holmes, K. J. and Jones, J. L. H. and Klein, O. and Koch, C. and Korb, S. and Lewinski, P. and Liao, J. D. and Lund, S. and Lupi\'a\~nez, J. and Lynott, D. and Nance, C. N. and Oosterwijk, S. and \"Ozdo{\u g}ru, A. A. and {Pacheco-Unguetti}, A. P. and Pearson, B. and Powis, C. and Riding, S. and Roberts, T.-A. and Rumiati, R. I. and Senden, M. and {Shea-Shumsky}, N. B. and Sobocko, K. and Soto, J. A. and Steiner, T. G. and Talarico, J. M. and van Allen, Z. M. and Vandekerckhove, M. and Wainwright, B. and Wayand, J. F. and Zeelenberg, R. and Zetzer, E. E. and Zwaan, R. A.},
month = oct,
year = {2016},
keywords = {replication,facial feedback hypothesis,many-labs,preregistration},
pages = {1745691616674458},
file = {D:\\core\\reading\\psychological science\\Wagenmakers 等。 - 2016 - Registered Replication Report Strack, Martin, & St.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\QHJQTV6W\\1745691616674458.html},
pmid = {27784749}
}
@article{PingUnderstandinggesturelistener2014,
title = {Understanding Gesture: {{Is}} the Listener's Motor System Involved?},
volume = {143},
issn = {0096-3445},
shorttitle = {Understanding Gesture},
doi = {10.1037/a0032246},
abstract = {Listeners are able to glean information from the gestures that speakers produce, seemingly without conscious awareness. However, little is known about the mechanisms that underlie this process. Research on human action understanding shows that perceiving another's actions results in automatic activation of the motor system in the observer, which then affects the observer's understanding of the actor's goals. We ask here whether perceiving another's gesture can similarly result in automatic activation of the motor system in the observer. In Experiment 1, we first establish a new procedure that uses listener response times to study how gesture impacts sentence comprehension. In Experiment 2, we use this procedure, in conjunction with a secondary motor task, to investigate whether the listener's motor system is involved in this process. We show that moving arms and hands (but not legs and feet) interferes with the listener's ability to use information conveyed in a speaker's hand gestures. Our data thus suggest that understanding gesture relies, at least in part, on the listener's own motor system.},
number = {1},
journal = {Journal of experimental psychology. General},
author = {Ping, Raedy M. and {Goldin-Meadow}, Susan and Beilock, Sian L.},
month = feb,
year = {2014},
pages = {195-204},
file = {D:\\core\\reading\\JEP_G\\Ping 等。 - 2014 - Understanding gesture Is the listener’s motor sys.pdf},
pmid = {23565671},
pmcid = {PMC3759547}
}
@article{ViechtbauerConductingMetaAnalysesmetafor2010,
title = {Conducting {{Meta}}-{{Analyses}} in {{R}} with the Metafor {{Package}}},
volume = {36},
issn = {1548-7660},
doi = {10.18637/jss.v036.i03},
abstract = {The metafor package provides functions for conducting meta-analyses in R. The package includes functions for fitting the meta-analytic fixed- and random-effects models and allows for the inclusion of moderators variables (study-level covariates) in these models. Meta-regression analyses with continuous and categorical moderators can be conducted in this way. Functions for the Mantel-Haenszel and Peto's one-step method for meta-analyses of 2 x 2 table data are also available. Finally, the package provides various plot functions (for example, for forest, funnel, and radial plots) and functions for assessing the model fit, for obtaining case diagnostics, and for tests of publication bias.},
number = {1},
journal = {Journal of Statistical Software},
author = {Viechtbauer, Wolfgang},
year = {2010},
pages = {1--48},
file = {D:\\core\\reading\\unsort\\v36i03.pdf}
}
@article{rouderBayesianTestsAccepting2009,
title = {Bayesian t Tests for Accepting and Rejecting the Null Hypothesis},
volume = {16},
issn = {1069-9384, 1531-5320},
doi = {10.3758/PBR.16.2.225},
abstract = {Progress in science often comes from discovering invariances in relationships among variables; these invariances often correspond to null hypotheses. As is commonly known, it is not possible to state evidence for the null hypothesis in conventional significance testing. Here we highlight a Bayes factor alternative to the conventional t test that will allow researchers to express preference for either the null hypothesis or the alternative. The Bayes factor has a natural and straightforward interpretation, is based on reasonable assumptions, and has better properties than other methods of inference that have been advocated in the psychological literature. To facilitate use of the Bayes factor, we provide an easy-to-use, Web-based program that performs the necessary calculations.},
language = {en},
number = {2},
journal = {Psychonomic Bulletin \& Review},
author = {Rouder, Jeffrey N. and Speckman, Paul L. and Sun, Dongchu and Morey, Richard D. and Iverson, Geoffrey},
month = apr,
year = {2009},
keywords = {Bayes factor,BFplus0},
pages = {225-237},
file = {D:\\core\\reading\\Psychonomic B&R\\art%3A10.3758%2FPBR.16.2.225.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\JN7DAD5E\\Rouder 等。 - 2009 - Bayesian t tests for accepting and rejecting the n.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\K55VTGNQ\\PBR.16.2.html}
}
@article{shepardMentalRotationThreedimensional1971,
title = {Mental Rotation of Three-Dimensional Objects},
volume = {171},
journal = {Science},
author = {Shepard, R. N. and Metzler, J.},
year = {1971},
pages = {701-703},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\TIIX5K6D\\mental-rotation.pdf}
}
@article{kleinerWhatNewPsychtoolbox32007,
title = {What's New in {{Psychtoolbox}}-3?},
volume = {36},
journal = {Perception},
author = {Kleiner, M and Brainard, D and Pelli, D},
year = {2007}
}
@article{wichertsDegreesFreedomPlanning2016,
title = {Degrees of {{Freedom}} in {{Planning}}, {{Running}}, {{Analyzing}}, and {{Reporting Psychological Studies}}: {{A Checklist}} to {{Avoid}} p-{{Hacking}}},
volume = {7},
issn = {1664-1078},
shorttitle = {Degrees of {{Freedom}} in {{Planning}}, {{Running}}, {{Analyzing}}, and {{Reporting Psychological Studies}}},
doi = {10.3389/fpsyg.2016.01832},
abstract = {The designing, collecting, analyzing, and reporting of psychological studies entail many choices that are often arbitrary. The opportunistic use of these so-called researcher degrees of freedom aimed at obtaining statistically significant results is problematic because it enhances the chances of false positive results and may inflate effect size estimates. In this review article, we present an extensive list of 34 degrees of freedom that researchers have in formulating hypotheses, and in designing, running, analyzing, and reporting of psychological research. The list can be used in research methods education, and as a checklist to assess the quality of preregistrations and to determine the potential for bias due to (arbitrary) choices in unregistered studies.},
language = {English},
journal = {Frontiers in Psychology},
author = {Wicherts, Jelte M. and Veldkamp, Coosje L. S. and Augusteijn, Hilde E. M. and Bakker, Marjan and Van Aert, Robbie C. M. and Van Assen, Marcel},
year = {2016},
keywords = {significance testing,Questionable research practices,p-hacking,Experimental design (study designs),Bias,significance chasing,Research methods education},
file = {D:\\core\\reading\\Frontiers\\Wicherts 等。 - 2016 - Degrees of Freedom in Planning, Running, Analyzing.pdf}
}
@article{greenSIMRPackagePower2016,
title = {{{SIMR}}: An {{R}} Package for Power Analysis of Generalized Linear Mixed Models by Simulation},
volume = {7},
issn = {2041-210X},
shorttitle = {{{SIMR}}},
doi = {10.1111/2041-210X.12504},
abstract = {* The r package simr allows users to calculate power for generalized linear mixed models from the lme4 package. The power calculations are based on Monte Carlo simulations.
* It includes tools for (i) running a power analysis for a given model and design; and (ii) calculating power curves to assess trade-offs between power and sample size.
* This paper presents a tutorial using a simple example of count data with mixed effects (with structure representative of environmental monitoring data) to guide the user along a gentle learning curve, adding only a few commands or options at a time.},
language = {en},
number = {4},
journal = {Methods in Ecology and Evolution},
author = {Green, Peter and MacLeod, Catriona J.},
month = apr,
year = {2016},
keywords = {Experimental design,sample size,random effects,Monte Carlo,glmm,type II error},
pages = {493-498},
file = {D:\\core\\reading\\unsort\\Green 與 MacLeod - 2016 - SIMR an R package for power analysis of generaliz.pdf;D:\\core\\reading\\unsort\\mee312504-sup-0003-AppendixS3.pdf}
}
@book{jeffreysTheoryProbability1961,
address = {Oxford [Oxfordshire] : New York},
edition = {3rd ed},
series = {Oxford Classic Texts in the Physical Sciences},
title = {Theory of Probability},
isbn = {978-0-19-850368-2},
lccn = {QA273 .J4 1998},
publisher = {{Clarendon Press ; Oxford University Press}},
author = {Jeffreys, Harold},
year = {1961},
keywords = {Bayes,Probabilities}
}
@article{SeticNumericalCongruencyEffect2017,
title = {Numerical {{Congruency Effect}} in the {{Sentence}}-{{Picture Verification Task}}},
volume = {64},
issn = {1618-3169, 2190-5142},
doi = {10.1027/1618-3169/a000358},
abstract = {In two experiments, we showed that irrelevant numerical information influenced the speed of sentence-picture verification. Participants were asked to verify whether the concept mentioned in a sentence matched the object presented in a subsequent picture. Concurrently, the number word attached to the concept in the sentence and the quantity of objects presented in the picture were manipulated (numerical congruency). The number of objects varied from one to four. In Experiment 1, participants read statements such as three dogs. In Experiment 2, they read sentences such as three dogs were wandering in the street. In both experiments, the verification speed revealed the interaction between response and numerical congruency. The verification times for concept-object match were faster when there was also numerical congruence (compared with incongruence) between the number word and quantity. On the other hand, there was no difference between numerical congruence and incongruence when the concept and object mismatched. The results are interpreted as evidence for the symbol grounding of number words in perceptual representation of small quantities, that is, quantities falling in the subitization range.},
language = {en},
number = {3},
journal = {Experimental Psychology},
author = {{\v S}eti\'c, Mia and Domijan, Dra{\v z}en},
month = may,
year = {2017},
pages = {159-169},
file = {D:\\core\\reading\\unsort\\1618-3169_a000358.pdf}
}
@article{schonbrodt_sequential_2017,
title = {Sequential Hypothesis Testing with {{Bayes}} Factors: {{Efficiently}} Testing Mean Differences.},
volume = {22},
issn = {1939-1463, 1082-989X},
shorttitle = {Sequential Hypothesis Testing with {{Bayes}} Factors},
doi = {10.1037/met0000061},
language = {en},
number = {2},
journal = {Psychological Methods},
author = {Sch\"onbrodt, Felix D. and Wagenmakers, Eric-Jan and Zehetleitner, Michael and Perugini, Marco},
year = {2017},
pages = {322-339},
file = {D:\\core\\reading\\unsort\\SBF_final.pdf}
}
@techreport{richard_morey_bayesfactor:_2015,
title = {{{BayesFactor}}: 0.9.12-2 {{CRAN}}},
shorttitle = {{{BayesFactor}}},
abstract = {CHANGES IN BayesFactor VERSION 0.9.12-2 CHANGES Added feature allowing fine-tuning of priors on a per-effect basis: see new argument rscaleEffects of lmBF, anovaBF, and generalTestBF Fixed bug that disallowed logical indexing of probability objects Fixed minor typos in documentation Fixed bug causing regression Bayes factors to fail for very small R\^2 Fixed bug disallowing expansion of dot (.) in generalTestBF model specifications Fixed bug preventing cancelling of all analyses with interrupt Restricted contingency prior to values $>$=1 All BFmodel objects have additional "analysis" slot giving details of analysis},
institution = {{Zenodo}},
author = {Morey, Richard D. and Rouder, Jeffrey N. and Love, Jonathon and Marwick, Ben},
month = sep,
year = {2015},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\R9MZZEIA\\31202.html},
doi = {10.5281/zenodo.31202}
}
@article{koning_mental_2017,
title = {Mental Simulation of Four Visual Object Properties: Similarities and Differences as Assessed by the Sentence\textendash{}Picture Verification Task},
volume = {29},
issn = {2044-5911},
shorttitle = {Mental Simulation of Four Visual Object Properties},
doi = {10.1080/20445911.2017.1281283},
abstract = {In the sentence\textendash{}picture verification (SPV) task, people read sentences implying the shape/size/colour/orientation of objects. They then verify whether pictured objects, which either match or mismatch the implied visual information mentioned in the sentence. Faster verification times on matching trials (match advantage) are considered supportive to the notion that readers perform mental simulations during sentence comprehension. This study advances this work by applying a within-subjects design to the SPV-task, enabling us to directly address the strength of and correlation between the match advantages for the properties shape, size, colour, and orientation. Results showed varying match advantages with colour showing the strongest effect, and no match advantage for orientation. Shape, size, and colour were significantly correlated, whereas there were no significant correlations with orientation. These findings suggest that interpretations of match advantages could benefit from a re-evaluation of mental simulation accountsby distinguishing between intrinsic (shape, size, and colour) and extrinsic (orientation) object properties.},
number = {4},
journal = {Journal of Cognitive Psychology},
author = {{de Koning}, Bj\"orn B. and Wassenburg, Stephanie I. and Bos, Lisanne T. and van der Schoot, Menno},
month = may,
year = {2017},
keywords = {embodied cognition,Reading comprehension,Mental simulation,sentencepicture verification task},
pages = {420-432},
file = {D:\\core\\reading\\unsort\\Manuscript_De Koning, Wassenburg, Bos & van der Schoot (submitted).pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\4WX6EGU9\\20445911.2017.html;D:\\core\\Version_Controls\\zotero_data\\storage\\IDUIM5J7\\20445911.2017.html}
}
@article{parsons_use_1995,
title = {Use of Implicit Motor Imagery for Visual Shape Discrimination as Revealed by {{PET}}},
volume = {375},
issn = {0028-0836, 1476-4687},
doi = {10.1038/375054a0},
language = {en},
number = {6526},
journal = {Nature},
author = {Parsons, Lawrence
M. and Fox, Peter
T. and Downs, J.
Hunter and Glass, Thomas and Hirsch, Traci
B. and Martin, Charles
C. and Jerabek, Paul
A. and Lancaster, Jack
L.},
month = may,
year = {1995},
pages = {54-58}
}
@techreport{westfall_pangea:_2016,
type = {Working Paper},
title = {{{PANGEA}}: {{Power ANalysis}} for {{GEneral Anova}} Designs},
abstract = {In this paper I present PANGEA (Power ANalysis for GEneral Anova designs;
http://jakewestfall.org/pangea/), a user-friendly, open source, web-based power application that
can be used for conducting power analyses in general ANOVA designs. A general ANOVA
design is any experimental design that can be described by some variety of ANOVA model.
Surprisingly, a power analysis program for general ANOVA designs did not exist until now.
PANGEA can estimate power for designs that consist of any number of factors, each with any
number of levels; any factor can be considered fixed or random; and any possible pattern of
nesting or crossing of the factors is allowed. I demonstrate how PANGEA can be used to
estimate power for anything from simple between- and within-subjects designs, to more
complicated designs with multiple random factors (e.g., multilevel designs and crossed-randomeffects
designs). I document the statistical theory underlying PANGEA and describe some
experimental features to be added in the near future.},
author = {Westfall, Jacob},
month = oct,
year = {2016},
file = {D:\\core\\reading\\unsort\\pangea.pdf}
}
@article{baayen_mixed-effects_2008,
title = {Mixed-Effects Modeling with Crossed Random Effects for Subjects and Items},
volume = {59},
issn = {0749596X},
doi = {10.1016/j.jml.2007.12.005},
language = {en},
number = {4},
journal = {Journal of Memory and Language},
author = {Baayen, R. Harald and Davidson, D.J. and Bates, D.M.},
year = {2008},
pages = {390-412},
file = {D:\\core\\reading\\jml\\BDB2008.pdf}
}
@article{batesParsimoniousMixedModels2015,
archivePrefix = {arXiv},
eprinttype = {arxiv},
eprint = {1506.04967},
primaryClass = {stat},
title = {Parsimonious {{Mixed Models}}},
abstract = {The analysis of experimental data with mixed-effects models requires decisions about the specification of the appropriate random-effects structure. Recently, Barr et al. (2013) recommended fitting 'maximal' models with all possible random effect components included. Estimation of maximal models, however, may not converge. We show that failure to converge typically is not due to a suboptimal estimation algorithm, but is a consequence of attempting to fit a model that is too complex to be properly supported by the data, irrespective of whether estimation is based on maximum likelihood or on Bayesian hierarchical modeling with uninformative or weakly informative priors. Importantly, even under convergence, overparameterization may lead to uninterpretable models. We provide diagnostic tools for detecting overparameterization and guiding model simplification. Finally, we clarify that the simulations on which Barr et al. base their recommendations are atypical for real data. A detailed example is provided of how subject-related attentional fluctuation across trials may further qualify statistical inferences about fixed effects, and of how such nonlinear effects can be accommodated within the mixed-effects modeling framework.},
journal = {arXiv:1506.04967 [stat]},
author = {Bates, Douglas and Kliegl, Reinhold and Vasishth, Shravan and Baayen, Harald},
year = {2015},
keywords = {Statistics - Methodology},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\VUGZB8YJ\\Bates 等。 - 2015 - Parsimonious Mixed Models.pdf}
}
@article{BrysbaertPoweranalysiseffect2018,
title = {Power Analysis and Effect Size in Mixed Effects Models: {{A}} Tutorial},
volume = {1},
doi = {10.5334/joc.10},
abstract = {In psychology, attempts to replicate published findings are less successful than expected. For properly powered studies replication rate should be around 80\%, whereas the Open Science Collaboration (2015) could replicate less than 40\% of the studies selected from different areas of psychology. Researchers are hindered in estimating the power of their studies, because the designs they use present a sample of stimulus materials to a sample of participants, a situation not covered by most power formulas. To remedy the situation, we review the literature related to the topic and introduce recent software packages, which we apply to the data of two masked priming studies with high power. We checked how we could estimate the power of each study and how much they could be reduced to remain powerful enough. On the basis of this analysis, we recommend that a properly powered reaction time experiment with repeated measures has at least 1,600 word observations per condition (e.g., 40 participants, 40 stimuli). This is considerably more than current practice. We also show that researchers must include the number of observations in meta-analyses because the effect sizes currently reported depend on the number of stimuli presented to the participants. Our analyses can easily be applied to new datasets gathered.},
number = {1},
journal = {Journal of Cognition},
author = {Brysbaert, Marc and Stevens, Micha\"el},
year = {2018},
keywords = {mixed-effects},
pages = {1-20},
file = {D:\\core\\reading\\unsort\\10-84-1-PB.pdf;D:\\core\\reading\\unsort\\Power_analysis_and_effect_size_in_mixed_effects_models_A_tutorial.pdf}
}
@article{novackLearningGestureHow2015,
title = {Learning from {{Gesture}}: {{How Our Hands Change Our Minds}}},
volume = {27},
issn = {1040-726X, 1573-336X},
shorttitle = {Learning from {{Gesture}}},
doi = {10.1007/s10648-015-9325-3},
language = {en},
number = {3},
journal = {Educational Psychology Review},
author = {Novack, Miriam A. and {Goldin-Meadow}, Susan},
month = sep,
year = {2015},
pages = {405-412}
}
@article{NovackGesturerepresentationalaction2017,
title = {Gesture as Representational Action: {{A}} Paper about Function},
volume = {24},
issn = {1069-9384, 1531-5320},
shorttitle = {Gesture as Representational Action},
doi = {10.3758/s13423-016-1145-z},
language = {en},
number = {3},
journal = {Psychonomic Bulletin \& Review},
author = {Novack, Miriam A. and {Goldin-Meadow}, Susan},
month = jun,
year = {2017},
pages = {652-665},
file = {D:\\core\\reading\\Psychonomic B&R\\Novack 與 Goldin-Meadow - 2017 - Gesture as representational action A paper about .pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\56Z9QFAG\\s13423-016-1145-z.html}
}
@article{chenDoesObjectSize2018,
title = {Does {{Object Size Matter}} with {{Regard}} to the {{Mental Simulation}} of {{Object Orientation}}?},
doi = {10.17605/OSF.IO/A3VWC},
abstract = {People have been argued to mentally represent the implied orientation of objects (Stanfield \& Zwaan, 2001). However, the effect is rather small in a sentence-picture verification task compared to published effects of other visual dimensions, such as shape, size, and color. The present study examines whether object size moderates the orientation effect. Theoretical considerations of how we interact with manipulable versus non-manipulable objects lead us to predict a smaller effect for manipulable objects than for non-manipulable objects. We furthermore predict the generalization of this pattern across languages (Chinese, Dutch, and English). Furthermore, we seek converging evidence in a picture-picture verification task that compares the verification times between pairs of rotated or non-rotated objects. Longer verification times for large objects would indicate that these objects require more cognitive effort for mental simulation than small objects, regardless of the presence of language.},
journal = {IPA by Experimental Psychology},
author = {Chen, Sau-Chin and {de Koning}, Bjorn and Zwaan, Rolf A.},
month = mar,
year = {2018},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\4Q3NVFY2\\a3vwc.html}
}
@article{ConnellFunctionalRoleModalitySpecific2012,
title = {A {{Functional Role}} for {{Modality}}-{{Specific Perceptual Systems}} in {{Conceptual Representations}}},
volume = {7},
issn = {1932-6203},
doi = {10.1371/journal.pone.0033321},
abstract = {Theories of embodied cognition suggest that conceptual processing relies on the same neural resources that are utilized for perception and action. Evidence for these perceptual simulations comes from neuroimaging and behavioural research, such as demonstrations of somatotopic motor cortex activations following the presentation of action-related words, or facilitation of grasp responses following presentation of object names. However, the interpretation of such effects has been called into question by suggestions that neural activation in modality-specific sensorimotor regions may be epiphenomenal, and merely the result of spreading activations from ``disembodied'', abstracted, symbolic representations. Here, we present two studies that focus on the perceptual modalities of touch and proprioception. We show that in a timed object-comparison task, concurrent tactile or proprioceptive stimulation to the hands facilitates conceptual processing relative to control stimulation. This facilitation occurs only for small, manipulable objects, where tactile and proprioceptive information form part of the multimodal perceptual experience of interacting with such objects, but facilitation is not observed for large, nonmanipulable objects where such perceptual information is uninformative. Importantly, these facilitation effects are independent of motor and action planning, and indicate that modality-specific perceptual information plays a functionally constitutive role in our mental representations of objects, which supports embodied assumptions that concepts are grounded in the same neural systems that govern perception and action.},
language = {en},
number = {3},
journal = {PLOS ONE},
author = {Connell, Louise and Lynott, Dermot and Dreyer, Felix},
month = mar,
year = {2012},
keywords = {Proprioception,Vision,Perception,Cognition,Arms,Hands,Sensory perception,Vibration},
pages = {e33321},
file = {D:\\core\\reading\\PLOS\\Connell 等。 - 2012 - A Functional Role for Modality-Specific Perceptual.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\NCPKT7FT\\article.html}
}
@article{CheungRegisteredReplicationReport2016,
title = {Registered {{Replication Report}}: {{Study}} 1 {{From Finkel}}, {{Rusbult}}, {{Kumashiro}}, \& {{Hannon}} (2002)},
volume = {11},
issn = {1745-6916, 1745-6924},
shorttitle = {Registered {{Replication Report}}},
doi = {10.1177/1745691616664694},
language = {en},
number = {5},
journal = {Perspectives on Psychological Science},
author = {Cheung, I. and Campbell, L. and LeBel, E. P. and Ackerman, R. A. and Aykuto{\u g}lu, B. and Bahn\'ik, {\v s}. and Bowen, J. D. and Bredow, C. A. and Bromberg, C. and Caprariello, P. A. and Carcedo, R. J. and Carson, K. J. and Cobb, R. J. and Collins, N. L. and Corretti, C. A. and DiDonato, T. E. and Ellithorpe, C. and {Fern\'andez-Rouco}, N. and Fuglestad, P. T. and Goldberg, R. M. and Golom, F. D. and {G\"undo{\u g}du-Akt\"urk}, E. and Hoplock, L. B. and Houdek, P. and Kane, H. S. and Kim, J. S. and Kraus, S. and Leone, C. T. and Li, N. P. and Logan, J. M. and Millman, R. D. and Morry, M. M. and Pink, J. C. and Ritchey, T. and Root Luna, L. M. and Sinclair, H. C. and Stinson, D. A. and Sucharyna, T. A. and Tidwell, N. D. and Uysal, A. and Vranka, M. and Winczewski, L. A. and Yong, J. C.},
month = sep,
year = {2016},
pages = {750-764},
note = {00012}
}
@article{HaggerMultilabPreregisteredReplication2016a,
title = {A {{Multilab Preregistered Replication}} of the {{Ego}}-{{Depletion Effect}}},
volume = {11},
issn = {1745-6916, 1745-6924},
doi = {10.1177/1745691616652873},
language = {en},
number = {4},
journal = {Perspectives on Psychological Science},
author = {Hagger, M. S. and Chatzisarantis, N. L. D. and Alberts, H. and Anggono, C. O. and Batailler, C. and Birt, A. R. and Brand, R. and Brandt, M. J. and Brewer, G. and Bruyneel, S. and Calvillo, D. P. and Campbell, W. K. and Cannon, P. R. and Carlucci, M. and Carruth, N. P. and Cheung, T. and Crowell, A. and De Ridder, D. T. D. and Dewitte, S. and Elson, M. and Evans, J. R. and Fay, B. A. and Fennis, B. M. and Finley, A. and Francis, Z. and Heise, E. and Hoemann, H. and Inzlicht, M. and Koole, S. L. and Koppel, L. and Kroese, F. and Lange, F. and Lau, K. and Lynch, B. P. and Martijn, C. and Merckelbach, H. and Mills, N. V. and Michirev, A. and Miyake, A. and Mosser, A. E. and Muise, M. and Muller, D. and Muzi, M. and Nalis, D. and Nurwanti, R. and Otgaar, H. and Philipp, M. C. and Primoceri, P. and Rentzsch, K. and Ringos, L. and Schlinkert, C. and Schmeichel, B. J. and Schoch, S. F. and Schrama, M. and Sch\"utz, A. and Stamos, A. and Tingh\"og, G. and Ullrich, J. and {vanDellen}, M. and Wimbarti, S. and Wolff, W. and Yusainy, C. and Zerhouni, O. and Zwienenberg, M.},
month = jul,
year = {2016},
keywords = {meta-analysis,strength model,energy model,resource depletion,self-regulation},
pages = {546-573},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\AWP2MFRA\\Hagger 與 Chatzisarantis - 2016 - A Multilab Preregistered Replication of the Ego-De.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\DNEQMHWC\\546.html}
}
@misc{arslanRubenarslanFormrOrg2018,
title = {Rubenarslan/{{Formr}}.{{Org}}: {{New Ways To Expire Surveys}}},
shorttitle = {Rubenarslan/{{Formr}}.{{Org}}},
abstract = {New options give you more control over when survey access expires.
Fixed server-side validation: information entered into forms is no longer lost.},
publisher = {{Zenodo}},
author = {Arslan, Ruben C. and Tata, Cyril S.},
year = {2018},
doi = {10.5281/zenodo.1161181}
}
@misc{TranslationProcess,
title = {Translation {{Process}}},
journal = {Psychological Science Accelerator},
howpublished = {https://psysciacc.org/translation-process/}
}
@article{alibaliEmbodimentMathematicsTeaching2012,
title = {Embodiment in {{Mathematics Teaching}} and {{Learning}}: {{Evidence From Learners}}' and {{Teachers}}' {{Gestures}}},
volume = {21},
shorttitle = {Embodiment in {{Mathematics Teaching}} and {{Learning}}},
doi = {10.1080/10508406.2011.611446},
journal = {Journal of the Learning Sciences},
author = {Alibali, Martha W. and Nathan, Mitchell J.},
year = {2012},
pages = {247-286}
}
@article{borreggineActionSentenceCompatibilityEffect2006,
title = {The {{Action}}-{{Sentence Compatibility Effect}}: {{It}}'s {{All}} in the {{Timing}}},
volume = {30},
issn = {03640213},
shorttitle = {The {{Action}}-{{Sentence Compatibility Effect}}},
doi = {10.1207/s15516709cog0000_91},
language = {en},
number = {6},
journal = {Cognitive Science},
author = {Borreggine, Kristin L. and Kaschak, Michael P.},
year = {2006},
pages = {1097-1112}
}
@article{jonesStageRRSubmission2018,
title = {Stage 1 {{RR Submission PreprintSocial}} Perception of Faces around the World: {{How}} Well Does the Valence-Dominance Model Generalize across World Regions? ({{Registered Report Stage}} 1)},
shorttitle = {Stage 1 {{RR Submission PreprintSocial}} Perception of Faces around the World},
doi = {10.17605/OSF.IO/N26DY},
abstract = {Over the last ten years, Oosterhof and Todorov's (2008) valence-dominance model of social judgments of faces has emerged as the most prominent account of how we evaluate faces on social dimensions. In this model, two dimensions (valence and dominance) underpin social judgments of faces. How well this model generalizes across world regions is a critical, yet unanswered, question. We will address this question by replicating Oosterhof and Todorov's (2008) methodology across all world regions (Africa, Asia, Central America and Mexico, Eastern Europe, Middle East, USA and Canada, Australia and New Zealand, Scandinavia, South America, UK, Western Europe, total N $\geq$ 9525) and using a diverse set of face stimuli. If we uncover systematic regional differences in social judgments, this will fundamentally change how social perception research is done and interpreted. If we find consistency across regions, this will ground future theory in an appropriately powered empirical test of an underlying assumption.},
journal = {PsyArXiv},
author = {Jones, Benedict C. and DeBruine, Lisa and Flake, Jessica Kay and Aczel, Balazs and Adamkovic, Matus and Alaei, Ravin and Alper, Sinan and Andreychik, Michael and Ansari, Daniel and Arnal, Jack and Babin{\v c}\'ak, Peter and Ban\'ik, Gabriel and Barzykowski, Krystian and Baskin, Ernest and Batres, Carlota and Blake, Khandis and {Borras-Guevara}, Martha Lucia and Brandt, Mark and Burin, D. I. and Cai, Sun Jun and Calvillo, Dustin and Chandel, Priyanka and Chatard, Armand and Chen, Sau-Chin and Chevallier, Coralie and Chopik, William J. and Christopherson, Cody D. and Coetzee, Vinet and Coles, Nicholas and Colloff, Melissa and Cook, Corey L. and Crawford, Matt and Danvers, Alexander and Dixson, Barnaby and Dranseika, Vilius and Dunham, Yarrow and Evans, Thomas Rhys and Fernandez, Ana Maria and Flowe, Heather D. and Forscher, Patrick S. and Gardiner, Gwendolyn and {Gilboa-Schechtman}, Eva and Gilead, Michael and Gill, Tripat and {Gonz\'alez-Santoyo}, Isaac and Hahn, Amanda C. and Hehman, Eric and Hu, Chuan-Peng and IJzerman, Hans and Inzlicht, Michael and Irrazabal, Natalia and Jaeger, Bastian and Jang, Chaning and Janssen, Steve M. J. and Jiang, Zhongqing and Ka{\v c}m\'ar, Pavol and Kaminski, Gwenael and Kapucu, Aycan and Koehn, Monica A. and Kovic, Vanja and Kujur, Pratibha and Kung, Chun-Chia and Lee, Ai-Suan and Legate, Nicole and Leong\'omez, Juan David and Levitan, Carmel and Lin, Hause and Lins, Samuel and Liu, Qinglan and Liuzza, Marco Tullio and Lutz, Johannes and Manley, Harry and Marshall, Tara and McCarthy, Randy J. and Michalak, Nicholas and Miller, Jeremy K. and Monajem, Arash and {Mu\~noz-reyes}, J. A. and Musser, Erica D. and Neyroud, Lison and Nielsen, Tonje Kvande and Olsen, Jerome and \"Ozdo{\u g}ru, Asil Ali and Pande, Babita and Parganiha, Arti and Parveen, Noorshama and Pfuhl, Gerit and Philipp, Michael Carl and Pinto, Isabel R. and Polo, Pablo and Pradhan, Sraddha and Protzko, John and Qi, Yue and Ren, Dongning and Ropovik, Ivan and Rule, Nicholas and S\'anchez, Oscar R. and Saribay, Selahattin Adil and Saunders, Blair and Schei, Vidar and Schmidt, Kathleen and Seehuus, Martin and Sharifian, MohammadHasan and Shiramizu, Victor Kenji Medeiros and Simchon, Almog and Singh, Margaret Messiah and Sirota, Miroslav and Sloane, Guyan and Solas, Sara \'Alvarez and de Lima, Tiago Jess\'e Souza and Stephen, Ian and Stieger, Stefan and Storage, Daniel and Sverdrup, Therese E. and Szecsi, Peter and Tamnes, Christian Krog and Tan, Chrystalle B. Y. and Thirkettle, Martin and Tiantian, Dong and Turiegano, Enrique and Uittenhove, Kim and Urry, Heather L. and Valderrama, Eugenio and Valentova, Jaroslava Varella and der Linden, Nicolas Van and Vanpaemel, Wolf and Varella, M. a. C. and {V\'asquez-Am\'ezquita}, Milena and Vaughn, Leigh Ann and Vergauwe, Evie and Vianello, Michelangelo and Wei, Tan Kok and White, David and Wilson, John Paul and Wlodarczyk, Anna and Wu, Qi and Yan, Wen-Jing and Yang, Xin and Zakharov, Ilia and Zickfeld, Janis Heinrich and Chartier, Christopher R.},
month = may,
year = {2018},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\YUWR6D4X\\Jones 等。 - 2018 - Stage 1 RR Submission PreprintSocial perception of.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\7GRZLHDM\\n26dy.html}
}
@article{kosterMentalSimulationObject2018,
title = {Mental Simulation of Object Orientation and Size: {{A}} Conceptual Replication with Second Language Learners},
volume = {2},
copyright = {Authors who publish with this journal agree to the following terms: Authors retain copyright and grant the journal right of first publication with the work simultaneously licensed under a Creative Commons Attribution License that allows others to share the work with an acknowledgement of the work's authorship and initial publication in this journal. Authors are able to enter into separate, additional contractual arrangements for the non-exclusive distribution of the journal's published version of the work (e.g., post it to an institutional repository or publish it in a book), with an acknowledgement of its initial publication in this journal. Authors are permitted and encouraged to post their work online (e.g., in institutional repositories or on their website) prior to and during the submission process, as it can lead to productive exchanges, as well as earlier and greater citation of published work (See The Effect of Open Access ). All third-party images reproduced on this journal are shared under Educational Fair Use. For more information on Educational Fair Use , please see this useful checklist prepared by Columbia University Libraries . All copyright of third-party content posted here for research purposes belongs to its original owners. Unless otherwise stated all references to characters and comic art presented on this journal are \textcopyright, \textregistered{} or \texttrademark{} of their respective owners. No challenge to any owner's rights is intended or should be inferred.},
issn = {2399-9101},
shorttitle = {Mental Simulation of Object Orientation and Size},
doi = {10.22599/jesla.39},
abstract = {Previous research suggests that native (L1) speakers employ ``mental simulations'' for language
comprehension. Empirical work shows that intrinsic object properties (shape, size and color) are indeed
simulated, but the evidence for extrinsic properties (orientation) is less convincing. There is little work
on simulation in second language (L2) learners, but since they have similar perceptual experiences as
L1~speakers there is good reason to think that L2 learners too use simulation to comprehend L2~sentences.
This paper aims to conceptually replicate previous simulation studies into object size and orientation with
L2 learners (N~=~223) and two L1~speaker control groups (N~=~64). An important difference with previous
work is that we use language-specific forms indicating size (Spanish augmentative suffixes) and orientation
(German placement verbs). We expected that language-specific forms would cause simulation for both the
intrinsic and extrinsic property under investigation. We employed a sentence-picture verification task and
analyzed Yes/No responses and reaction times (RTs). RT results on mis/match trials reveal no orientation
effect, but a size match effect. Findings support previous research with null results for orientation and
add support for size simulation. We suggest that future studies examine whether L2 learners make
simulations for both implied and explicit sentences, whether they simulate with or without prior language instruction and whether they also simulate shape and color.},
language = {eng},
number = {1},
journal = {Journal of the European Second Language Association},
author = {Koster, Dietha and Cadierno, Teresa and Chiarandini, Marco},
month = aug,
year = {2018},
file = {D:\\core\\reading\\unsort\\Koster 等。 - 2018 - Mental simulation of object orientation and size .pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\WPLWUGGF\\jesla.html}
}
@misc{chenObjectOrientationCross2018,
title = {Object {{Orientation}} Cross Languages},
abstract = {This project is prepared for Psychological Science Accelerator
Hosted on the Open Science Framework},
language = {en},
journal = {OSF},
howpublished = {https://osf.io/e428p/},
author = {Chen, Sau-Chin and Chartier, Christopher R. and Szabelska, Anna},
year = {2018},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\M9IMPKVM\\e428p.html}
}
@article{zwaanRevisitingMentalSimulation2016,
title = {Revisiting {{Mental Simulation}} in {{Language Comprehension}}: {{Six Replication Attempts}}},
shorttitle = {Revisiting {{Mental Simulation}} in {{Language Comprehension}}},
doi = {None},
abstract = {Raw and aggregated data in Excel of Experiments 1a-3b, as reported in the paper.
Hosted on the Open Science Framework},
language = {en},
journal = {OSF},
author = {Zwaan, Rolf A. and Pecher, Diane},
month = aug,
year = {2016},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\VZDT3QIH\\2dtrh.html}
}
@misc{chenObjectOrientationCross2018a,
title = {Object {{Orientation}} Cross Languages: {{Pilot}}},
shorttitle = {Object {{Orientation}} Cross Languages},
abstract = {This project is the duplication of the project: Object Orientation cross languages.
Hosted on the Open Science Framework},
language = {en},
journal = {OSF},
howpublished = {https://osf.io/ftk7m/},
author = {Chen, Sau-Chin and Chartier, Christopher R. and Szabelska, Anna and Aczel, Balazs and Werner, Kaitlyn M. and Schmidt, Kathleen and Malavanti, Karenna and Musser, Erica D. and Attila, Sz{\H u}ts and Martinez, Jessica},
year = {2018},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\9IJI6JYH\\ftk7m.html}
}
@article{moshontzPsychologicalScienceAccelerator2018,
title = {The {{Psychological Science Accelerator}}: {{Advancing Psychology Through}} a {{Distributed Collaborative Network}}:},
shorttitle = {The {{Psychological Science Accelerator}}},
doi = {10.1177/2515245918797607},
abstract = {Concerns about the veracity of psychological research have been growing. Many findings in psychological science are based on studies with insufficient statistical power and nonrepresentative samples, or may otherwise be limited to specific, ungeneralizable settings or populations. Crowdsourced research, a type of large-scale collaboration in which one or more research projects are conducted across multiple lab sites, offers a pragmatic solution to these and other current methodological challenges. The Psychological Science Accelerator (PSA) is a distributed network of laboratories designed to enable and support crowdsourced research projects. These projects can focus on novel research questions or replicate prior research in large, diverse samples. The PSA's mission is to accelerate the accumulation of reliable and generalizable evidence in psychological science. Here, we describe the background, structure, principles, procedures, benefits, and challenges of the PSA. In contrast to other crowdsourced research networks, the PSA is ongoing (as opposed to time limited), efficient (in that structures and principles are reused for different projects), decentralized, diverse (in both subjects and researchers), and inclusive (of proposals, contributions, and other relevant input from anyone inside or outside the network). The PSA and other approaches to crowdsourced psychological science will advance understanding of mental processes and behaviors by enabling rigorous research and systematic examination of its generalizability.},
language = {en},
journal = {Advances in Methods and Practices in Psychological Science},
author = {Moshontz, Hannah and Campbell, Lorne and Ebersole, Charles R. and IJzerman, Hans and Urry, Heather L. and Forscher, Patrick S. and Grahe, Jon E. and McCarthy, Randy J. and Musser, Erica D. and Antfolk, Jan and Castille, Christopher M. and Evans, Thomas Rhys and Fiedler, Susann and Flake, Jessica Kay and Forero, Diego A. and Janssen, Steve M. J. and Keene, Justin Robert and Protzko, John and Aczel, Balazs and Solas, Sara \'Alvarez and Ansari, Daniel and Awlia, Dana and Baskin, Ernest and Batres, Carlota and {Borras-Guevara}, Martha Lucia and Brick, Cameron and Chandel, Priyanka and Chatard, Armand and Chopik, William J. and Clarance, David and Coles, Nicholas A. and Corker, Katherine S. and Dixson, Barnaby James Wyld and Dranseika, Vilius and Dunham, Yarrow and Fox, Nicholas W. and Gardiner, Gwendolyn and Garrison, S. Mason and Gill, Tripat and Hahn, Amanda C. and Jaeger, Bastian and Ka{\v c}m\'ar, Pavol and Kaminski, Gwena\"el and Kanske, Philipp and Kekecs, Zoltan and Kline, Melissa and Koehn, Monica A. and Kujur, Pratibha and Levitan, Carmel A. and Miller, Jeremy K. and Okan, Ceylan and Olsen, Jerome and {Oviedo-Trespalacios}, Oscar and \"Ozdo{\u g}ru, Asil Ali and Pande, Babita and Parganiha, Arti and Parveen, Noorshama and Pfuhl, Gerit and Pradhan, Sraddha and Ropovik, Ivan and Rule, Nicholas O. and Saunders, Blair and Schei, Vidar and Schmidt, Kathleen and Singh, Margaret Messiah and Sirota, Miroslav and Steltenpohl, Crystal N. and Stieger, Stefan and Storage, Daniel and Sullivan, Gavin Brent and Szabelska, Anna and Tamnes, Christian K. and Vadillo, Miguel A. and Valentova, Jaroslava V. and Vanpaemel, Wolf and Varella, Marco A. C. and Vergauwe, Evie and Verschoor, Mark and Vianello, Michelangelo and Voracek, Martin and Williams, Glenn P. and Wilson, John Paul and Zickfeld, Janis H. and Arnal, Jack D. and Aydin, Burak and Chen, Sau-Chin and DeBruine, Lisa M. and Fernandez, Ana Maria and Horstmann, Kai T. and Isager, Peder M. and Jones, Benedict and Kapucu, Aycan and Lin, Hause and Mensink, Michael C. and Navarrete, Gorka and Silan, Miguel A. and Chartier, Christopher R.},
month = oct,
year = {2018},
file = {D:\\core\\reading\\preprints\\Moshontz 等。 - 2018 - The Psychological Science Accelerator Advancing P.pdf;D:\\core\\reading\\preprints\\PSA 4 2 2018.pdf;D:\\core\\reading\\unsort\\0104066.pdf;D:\\core\\Version_Controls\\zotero_data\\storage\\4WDKBD9C\\full.html;D:\\core\\Version_Controls\\zotero_data\\storage\\6LXEVRW7\\785qu.html}
}
@article{mahonCriticalLookEmbodied2008,
title = {A Critical Look at the Embodied Cognition Hypothesis and a New Proposal for Grounding Conceptual Content},
volume = {102},
issn = {0928-4257},
doi = {10.1016/j.jphysparis.2008.03.004},
abstract = {Many studies have demonstrated that the sensory and motor systems are activated during conceptual processing. Such results have been interpreted as indicating that concepts, and important aspects of cognition more broadly, are embodied. That conclusion does not follow from the empirical evidence. The reason why is that the empirical evidence can equally be accommodated by a 'disembodied' view of conceptual representation that makes explicit assumptions about spreading activation between the conceptual and sensory and motor systems. At the same time, the strong form of the embodied cognition hypothesis is at variance with currently available neuropsychological evidence. We suggest a middle ground between the embodied and disembodied cognition hypotheses--grounding by interaction. This hypothesis combines the view that concepts are, at some level, 'abstract' and 'symbolic', with the idea that sensory and motor information may 'instantiate' online conceptual processing.},
language = {eng},
number = {1-3},
journal = {Journal of Physiology, Paris},
author = {Mahon, Bradford Z. and Caramazza, Alfonso},
year = {2008},
keywords = {Humans,Concept Formation,Cognition,Neuropsychological Tests,Psychological Theory},
pages = {59-70},
file = {D:\\core\\reading\\unsort\\Mahon 與 Caramazza - 2008 - A critical look at the embodied cognition hypothes.pdf},
pmid = {18448316}
}
@unpublished{phillsGenderedSocialCategoryinpreparation,
title = {Gendered {{Social Category Representations}}},
author = {Phills, Curtis and Kekecs, Zoltan},
year = {in preparation}
}
@article{beilockSportsExperienceChanges2008,
title = {Sports Experience Changes the Neural Processing of Action Language},
volume = {105},
issn = {0027-8424, 1091-6490},
doi = {10.1073/pnas.0803424105},
language = {en},
number = {36},
journal = {Proceedings of the National Academy of Sciences},
author = {Beilock, S. L. and Lyons, I. M. and {Mattarella-Micke}, A. and Nusbaum, H. C. and Small, S. L.},
year = {2008},
pages = {13269-13273},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\C63Z7X27\\Beilock 等。 - 2008 - Sports experience changes the neural processing of.pdf}
}
@article{hsuColorContextCognitive2011,
title = {Color, {{Context}}, and {{Cognitive Style}}: {{Variations}} in {{Color Knowledge Retrieval}} as a {{Function}} of {{Task}} and {{Subject Variables}}},
volume = {23},
issn = {0898-929X, 1530-8898},
shorttitle = {Color, {{Context}}, and {{Cognitive Style}}},
doi = {10.1162/jocn.2011.21619},
language = {en},
number = {9},
journal = {Journal of Cognitive Neuroscience},
author = {Hsu, Nina S. and Kraemer, David J. M. and Oliver, Robyn T. and Schlichting, Margaret L. and {Thompson-Schill}, Sharon L.},
month = sep,
year = {2011},
pages = {2544-2557}
}
@article{vukovicIndividualDifferencesSpatial2015,
title = {Individual Differences in Spatial Cognition Influence Mental Simulation of Language},
volume = {142},
issn = {00100277},
doi = {10.1016/j.cognition.2015.05.017},
language = {en},
journal = {Cognition},
author = {Vukovic, Nikola and Williams, John N.},
month = sep,
year = {2015},
pages = {110-122}
}
@article{zwaanReadersConstructSpatial1993,
title = {Do Readers Construct Spatial Representations in Naturalistic Story Comprehension?},
volume = {16},
issn = {0163-853X, 1532-6950},
doi = {10.1080/01638539309544832},
language = {en},
number = {1-2},
journal = {Discourse Processes},
author = {Zwaan, Rolf A. and {van Oostendorp}, Herre},
month = jan,
year = {1993},
pages = {125-143}
}
@article{chuSpontaneousGesturesMental2008,
title = {Spontaneous Gestures during Mental Rotation Tasks: {{Insights}} into the Microdevelopment of the Motor Strategy.},
volume = {137},
issn = {1939-2222, 0096-3445},
shorttitle = {Spontaneous Gestures during Mental Rotation Tasks},
doi = {10.1037/a0013157},
language = {en},
number = {4},
journal = {Journal of Experimental Psychology: General},
author = {Chu, Mingyuan and Kita, Sotaro},
year = {2008},
pages = {706-723},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\7GZNJ8R5\\Chu 與 Kita - 2008 - Spontaneous gestures during mental rotation tasks.pdf}
}
@article{wragaPassiveTactileFeedback2008,
title = {Passive Tactile Feedback Facilitates Mental Rotation of Handheld Objects},
volume = {36},
issn = {0090-502X, 1532-5946},
doi = {10.3758/MC.36.2.271},
language = {en},
number = {2},
journal = {Memory \& Cognition},
author = {Wraga, Maryjane and Swaby, Monique and Flynn, Catherine M.},
month = mar,
year = {2008},
pages = {271-281},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\PXHGGJDF\\Wraga 等。 - 2008 - Passive tactile feedback facilitates mental rotati.pdf}
}
@article{pouwMoreEmbeddedExtended2014,
title = {Toward a More Embedded/Extended Perspective on the Cognitive Function of Gestures},
volume = {5},
issn = {1664-1078},
doi = {10.3389/fpsyg.2014.00359},
journal = {Frontiers in Psychology},
author = {Pouw, Wim T. J. L. and {de Nooijer}, Jacqueline A. and {van Gog}, Tamara and Zwaan, Rolf A. and Paas, Fred},
month = apr,
year = {2014},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\7I4EWAH7\\Pouw 等。 - 2014 - Toward a more embeddedextended perspective on the.pdf}
}
@article{lebelUnifiedFrameworkQuantify2018,
title = {A {{Unified Framework}} to {{Quantify}} the {{Credibility}} of {{Scientific Findings}}},
volume = {1},
issn = {2515-2459, 2515-2467},
doi = {10.1177/2515245918787489},
abstract = {Societies invest in scientific studies to better understand the world and attempt to harness such improved understanding to address pressing societal problems. Published research, however, can be useful for theory or application only if it is credible. In science, a credible finding is one that has repeatedly survived risky falsification attempts. However, state-of-the-art meta-analytic approaches cannot determine the credibility of an effect because they do not account for the extent to which each included study has survived such attempted falsification. To overcome this problem, we outline a unified framework for estimating the credibility of published research by examining four fundamental falsifiability-related dimensions: (a) transparency of the methods and data, (b) reproducibility of the results when the same data-processing and analytic decisions are reapplied, (c) robustness of the results to different data-processing and analytic decisions, and (d) replicability of the effect. This framework includes a standardized workflow in which the degree to which a finding has survived scrutiny is quantified along these four facets of credibility. The framework is demonstrated by applying it to published replications in the psychology literature. Finally, we outline a Web implementation of the framework and conclude by encouraging the community of researchers to contribute to the development and crowdsourcing of this platform.},
language = {en},
number = {3},
journal = {Advances in Methods and Practices in Psychological Science},
author = {LeBel, Etienne P. and McCarthy, Randy J. and Earp, Brian D. and Elson, Malte and Vanpaemel, Wolf},
year = {2018},
pages = {389-402},
file = {D:\\core\\reading\\unsort\\Curate-Science-General-Framework[4.4.0]-pre-print-version.pdf;D:\\core\\reading\\unsort\\lebeletal(2018,ampss)a-unified-framework-to-quantify-the-credibility-of-scientific-findings.pdf}
}
@article{hallAcceleratedCREPRRR2018,
title = {Accelerated {{CREP}} - {{RRR}}: {{Turri}}, {{Buckwalter}}, \& {{Blouw}} (2015)},
shorttitle = {Accelerated {{CREP}} - {{RRR}}},
doi = {10.31234/osf.io/zeux9},
abstract = {According to the Justified True Belief (JTB) account of knowledge, a person's ability to know something is defined by having a belief that is both justified and true (i.e., knowledge is justified true belief). However, this account fails to consider the role of luck. In 1963, Gettier argued that JTB is insufficient because it does not account for certain situations, called Gettier cases, wherein a person is justified for believing something true but only because of luck. It is unclear whether lay people's intuitions about knowledge lead them to agree with Gettier, such that lay people believe that individuals in these cases lack knowledge (referred to as Gettier intuitions). We attempt to provide a robust estimate of the Gettier intuition effect size by replicating Turri and colleagues' (2015) Experiment 1. The Collaborative Replications and Education Project (CREP) selected this study for replication based on its undergraduate appeal, feasibility, and pedagogical value. However, in light of some inconsistent results, suboptimal designs, and inconsistent evidence for cultural variation (e.g., Machery et al., 2015; Nagel, et al., 2013; Seyedsayamdost et al., 2015; Starman \& Friedman, 2012; Weinberg et al., 2001), the improved methodology of Turri et al. (2015) make it an important study to replicate cross-culturally. Therefore, we propose a multisite collaborative preregistered replication of Turri and colleague's (2015) Experiment 1 (35 labs from 14 countries across 4 continents signed up at time of submission; expected minimum N = 1,500). Results of this study are expected to provide a clearer picture of the Gettier intuition effect size, lay people's theory and practice of knowledge, and potentially cross-cultural similarities and differences. Preprint: [X] Pre-registered protocols: [X]},
author = {Hall, Braeden and Wagge, Jordan and Chartier, Christopher R. and Pfuhl, Gerit and Stieger, Stefan and Vergauwe, Evie and Musser, Erica D. and Alvarez, Leslie Cramblet and Solas, Sara Alvarez and {Schulte-Mecklenbeck}, Michael and Field, Andy and Henderson, Elena Kelsey and IJzerman, Hans and Fallon, Marianne and Ozdogru, Asil and Andreychik, Michael and Chen, Sau-Chin and Sampaio, Waldir M. and Tkalich, Anastasiia and Nusbaum, Amy and Evans, Thomas Rhys and Voracek, Martin and Tran, Ulrich and Hao, Sean Lee Teck and Redman, David and \"Ozben, S\"umeyra and Mathis, Nicole and Pernerstorfer, Felizitas and Hartanto, Andree and M\ae{}kel\ae, Martin Jensen and Cova, Florian and Arnal, Jack and Werner, Kaitlyn M. and Buchanan, Erin Michelle and Johnson, Hannah L. and Dranseika, Vilius and Gwena\"el, Kaminski and Schwegmann, Sam and Schild, Christoph and Yildirim, Evin and Endres, Yannick Michael and Bange, Paula and Aschenbroich, Kira and Li, Manyu and Vu, Annie Hanh and Aczel, Balazs and {Chia-Shien}, Lin and Chiu, Bryandon and Ying, Lim Ke and Kirk, Neil W. and Legate, Nicole and Lowry, Miles B. and Osborne, Lindsey and Xuan, Lee Ru and Szecsi, Peter and Weisberg, Yanna and Ku, Yu-Hsuan and Lamb, Colleen and Krafnick, Anthony James and Warne, Russell T. and Levitan, Carmel and Storage, Daniel and Barzykowski, Krystian and Meijer, Ewout and Fincher, Corey and Batres, Carlota and {Calin-Jageman}, Robert and Brandt, Mark and Grahe, Jon},
month = oct,
year = {2018},
file = {D:\\core\\reading\\preprints\\Hall 等。 - 2018 - Accelerated CREP - RRR Turri, Buckwalter, & Blouw.pdf}
}
@article{chenInvestigatingObjectOrientation2018,
title = {Investigating {{Object Orientation Effects Across}} 14 {{Languages}}},
doi = {10.31234/osf.io/t2pjv},
abstract = {Mental simulation theories of language comprehension propose that people automatically create mental representations of real objects. Evidence from sentence-picture verification tasks has shown that people mentally represent various visual properties such as shape, color, and size. However, the evidence for mental simulations of object orientation is limited. We report a study that investigates the match advantage of object orientation across speakers of different languages. This multi-laboratory project aims to achieve two objectives. First, we examine the replicability of the match advantage of object orientation across multiple languages and laboratories. Second, we will use a mental rotation task to measure participants' mental imagery after the sentence-picture verification task. The relationship between the participants' performance of the two tasks will provide a cross-linguistic examination of perceptual simulation processes. With the (broad) evaluation of individual mental imagery ability and potential linguistic moderators, we expect a robust estimation of match advantage of object orientation.},
author = {Chen, Sau-Chin and Szabelska, Anna and Chartier, Christopher R. and Kekecs, Zoltan and Lynott, Dermot and Bernabeu, Pablo and Jones, Benedict C. and DeBruine, Lisa and Levitan, Carmel and Werner, Kaitlyn M. and Wang, Kelly and Milyavskaya, Marina and Musser, Erica D. and {Papadatou-Pastou}, Marietta and Coles, Nicholas and Janssen, Steve and Ozdogru, Asil and Storage, Daniel and Manley, Harry and Brown, Benjamin T. and Barzykowski, Krystian and Evans, Thomas Rhys and Oberzaucher, Elisabeth and Li, Manyu and Vaughn, Leigh Ann and Aczel, Balazs and Attila, Sz{\H u}ts and Batres, Carlota and Chopik, William J. and Peters, Kim Olivia and Olsen, Jerome and Voracek, Martin and Tamnes, Christian Krog and Sirota, Miroslav and Liu, Dawn and Williams, Glenn Patrick and Parganiha, Arti and Chandel, Priyanka and Singh, Margaret Messiah and Tan, Chrystalle B. Y. and Protzko and Arnal, Jack and Stieger, Stefan and Liuzza, Marco Tullio and Ka{\v c}m\'ar, Pavol and Bavolar, Jozef and Ban\'ik, Gabriel and Adamkovic, Matus and Ropovik, Ivan and Babincak, Peter and Seehuus, Martin and Kovic, Vanja and Schmidt, Kathleen},
month = nov,
year = {2018},
file = {D:\\core\\reading\\preprints\\Chen 等。 - 2018 - Investigating Object Orientation Effects Across 14.pdf}
}
@article{hoebenmannaertColorIntegralPart2017,
title = {Is Color an Integral Part of a Rich Mental Simulation?},
volume = {45},
issn = {0090-502X, 1532-5946},
doi = {10.3758/s13421-017-0708-1},
language = {en},
number = {6},
journal = {Memory \& Cognition},
author = {Hoeben Mannaert, Lara N. and Dijkstra, Katinka and Zwaan, Rolf A.},
month = aug,
year = {2017},
pages = {974-982},
file = {D:\\core\\reading\\m&c\\Hoeben Mannaert 等。 - 2017 - Is color an integral part of a rich mental simulat.pdf}
}
@article{wexlerMotorProcessesMental1998,
title = {Motor Processes in Mental Rotation},
volume = {68},
issn = {0010-0277},
abstract = {Much indirect evidence supports the hypothesis that transformations of mental images are at least in part guided by motor processes, even in the case of images of abstract objects rather than of body parts. For example, rotation may be guided by processes that also prime one to see results of a specific motor action. We directly test the hypothesis by means of a dual-task paradigm in which subjects perform the Cooper-Shepard mental rotation task while executing an unseen motor rotation in a given direction and at a previously-learned speed. Four results support the inference that mental rotation relies on motor processes. First, motor rotation that is compatible with mental rotation results in faster times and fewer errors in the imagery task than when the two rotations are incompatible. Second, the angle through which subjects rotate their mental images, and the angle through which they rotate a joystick handle are correlated, but only if the directions of the two rotations are compatible. Third, motor rotation modifies the classical inverted V-shaped mental rotation response time function, favoring the direction of the motor rotation; indeed, in some cases motor rotation even shifts the location of the minimum of this curve in the direction of the motor rotation. Fourth, the preceding effect is sensitive not only to the direction of the motor rotation, but also to the motor speed. A change in the speed of motor rotation can correspondingly slow down or speed up the mental rotation.},
language = {eng},
number = {1},
journal = {Cognition},
author = {Wexler, M. and Kosslyn, S. M. and Berthoz, A.},
month = aug,
year = {1998},
keywords = {Adult,Female,Humans,Male,Reaction Time,Models; Neurological,Mental Processes,Psychomotor Performance,Analysis of Variance,Imagery (Psychotherapy),Motor Skills,Rotation,User-Computer Interface},
pages = {77-94},
pmid = {9775517}
}
@article{windischbergerHumanMotorCortex2003,
title = {Human Motor Cortex Activity during Mental Rotation},
volume = {20},
issn = {1053-8119},
abstract = {The functional role of human premotor and primary motor cortex during mental rotation has been studied using functional MRI at 3 T. Fourteen young, male subjects performed a mental rotation task in which they had to decide whether two visually presented cubes could be identical. Exploratory Fuzzy Cluster Analysis was applied to identify brain regions with stimulus-related time courses. This revealed one dominant cluster which included the parietal cortex, premotor cortex, and dorsolateral prefrontal cortex that showed signal enhancement during the whole stimulus presentation period, reflecting cognitive processing. A second cluster, encompassing the contralateral primary motor cortex, showed activation exclusively after the button press response. This clear separation was possible in 3 subjects only, however. Based on these exploratory results, the hypothesis that primary motor cortex activity was related to button pressing only was tested using a parametric approach via a random-effects group analysis over all 14 subjects in SPM99. The results confirmed that the stimulus response via button pressing causes activation in the primary motor cortex and supplementary motor area while parietal cortex and mesial regions rostral to the supplementary motor area are recruited for the actual mental rotation process.},
language = {eng},
number = {1},
journal = {NeuroImage},
author = {Windischberger, Christian and Lamm, Claus and Bauer, Herbert and Moser, Ewald},
month = sep,
year = {2003},
keywords = {Adult,Humans,Male,Brain Mapping,Imagination,Data Interpretation; Statistical,Rotation,Cluster Analysis,Fuzzy Logic,Image Processing; Computer-Assisted,Magnetic Resonance Imaging,Motor Cortex},
pages = {225-232},
pmid = {14527583}
}
@article{wohlschlagerMentalManualRotation1998,
title = {Mental and Manual Rotation},
volume = {24},
issn = {0096-1523},
abstract = {The relation between mental and manual rotation was investigated in 2 experiments. Experiment 1 compared the response times (RTs) of mental rotation about 4 axes in space with the RTs shown in the same task when participants were allowed to reorient the stimuli by means of rotational hand movements. For the 3 Cartesian axes, RT functions were quantitatively indistinguishable. Experiment 2 investigated interference between mental rotation and 4 kinds of simultaneously executed hand movements that did not reorient the stimuli. Interference was observed only when axes of manual and mental rotation coincided in space. Regardless of the hand used, concordant rotational directions facilitated, whereas discordant directions inhibited, mental rotation. The results suggest that mental object rotation and rotatory object manipulation share a common process that is thought to control he dynamics of both imagined and actually performed object reorientation.},
language = {eng},
number = {2},
journal = {Journal of Experimental Psychology. Human Perception and Performance},
author = {Wohlschl\"ager, A. and Wohlschl\"ager, A.},
month = apr,
year = {1998},
keywords = {Humans,Reaction Time,Pattern Recognition; Visual,Attention,Discrimination Learning,Imagination,Psychophysics,Orientation,Psychomotor Performance,Depth Perception},
pages = {397-412},
pmid = {9606108}
}
@article{akaikeNewLookStatistical1974,
title = {A New Look at the Statistical Model Identification},
volume = {19},
issn = {0018-9286},
doi = {10.1109/TAC.1974.1100705},
language = {en},
number = {6},
journal = {IEEE Transactions on Automatic Control},
author = {Akaike, H.},
month = dec,
year = {1974},
pages = {716-723}
}
@book{burnhamModelSelectionMultimodel2010,
address = {New York, NY},
edition = {2. ed},
title = {Model Selection and Multimodel Inference: A Practical Information-Theoretic Approach},
isbn = {978-0-387-22456-5 978-1-4419-2973-0},
shorttitle = {Model Selection and Multimodel Inference},
language = {eng},
publisher = {{Springer}},
author = {Burnham, Kenneth P. and Anderson, David Ray},
year = {2010},
file = {D:\\core\\Version_Controls\\zotero_data\\storage\\DD7T5FIV\\Burnham 與 Anderson - 2010 - Model selection and multimodel inference a practi.pdf},
note = {OCLC: 846443242}
}
@Manual{R-base,
title = {R: A Language and Environment for Statistical Computing},
author = {{R Core Team}},
organization = {R Foundation for Statistical Computing},
address = {Vienna, Austria},
year = {2018},
url = {https://www.R-project.org/},
}
@Manual{R-dplyr,
title = {dplyr: A Grammar of Data Manipulation},
author = {Hadley Wickham and Romain Francois and Lionel Henry and Kirill Muller},
year = {2018},
note = {R package version 0.7.8},
url = {https://CRAN.R-project.org/package=dplyr},
}
@Article{R-Formula,
title = {Extended Model Formulas in {R}: Multiple Parts and Multiple Responses},
author = {Achim Zeileis and Yves Croissant},
journal = {Journal of Statistical Software},
year = {2010},
volume = {34},
number = {1},
pages = {1--13},
doi = {10.18637/jss.v034.i01},
}
@Book{R-ggplot2,
author = {Hadley Wickham},
title = {ggplot2: Elegant Graphics for Data Analysis},
publisher = {Springer-Verlag New York},
year = {2016},
isbn = {978-3-319-24277-4},
url = {http://ggplot2.org},
}
@Manual{R-Hmisc,
title = {Hmisc: Harrell Miscellaneous},
author = {Frank E {Harrell Jr} and with contributions from Charles Dupont and many others.},
year = {2019},
note = {R package version 4.2-0},
url = {https://CRAN.R-project.org/package=Hmisc},
}
@Manual{R-kableExtra,
title = {kableExtra: Construct Complex Table with 'kable' and Pipe Syntax},
author = {Hao Zhu},
year = {2019},
note = {R package version 1.0.1},
url = {https://CRAN.R-project.org/package=kableExtra},
}
@Book{R-lattice,
title = {Lattice: Multivariate Data Visualization with R},
author = {Deepayan Sarkar},
publisher = {Springer},
address = {New York},
year = {2008},
note = {ISBN 978-0-387-75968-5},
url = {http://lmdvr.r-forge.r-project.org},
}
@Manual{R-papaja,
author = {Frederik Aust and Marius Barth},
title = {{papaja}: {Create} {APA} manuscripts with {R Markdown}},
year = {2018},
note = {R package version 0.1.0.9842},
url = {https://github.com/crsh/papaja},
}
@Book{R-survival-book,
title = {Modeling Survival Data: Extending the {C}ox Model},
author = {{Terry M. Therneau} and {Patricia M. Grambsch}},
year = {2000},
publisher = {Springer},
address = {New York},
isbn = {0-387-98784-3},
}
@Manual{R-tables,
title = {tables: Formula-Driven Table Generation},
author = {Duncan Murdoch},
year = {2018},
note = {R package version 0.8.7},
url = {https://CRAN.R-project.org/package=tables},
}