{"id":3595,"date":"2021-06-11T15:49:00","date_gmt":"2021-06-11T15:49:00","guid":{"rendered":"https:\/\/speechneurolab.ca\/?p=3595"},"modified":"2024-01-09T17:19:08","modified_gmt":"2024-01-09T17:19:08","slug":"leffet-mcgurk","status":"publish","type":"post","link":"https:\/\/speechneurolab.ca\/en\/leffet-mcgurk\/","title":{"rendered":"The McGurk effect"},"content":{"rendered":"\t\t<div data-elementor-type=\"wp-post\" data-elementor-id=\"3595\" class=\"elementor elementor-3595 elementor-1660\">\n\t\t\t\t\t\t<section class=\"elementor-section elementor-top-section elementor-element elementor-element-dc3a497 elementor-section-boxed elementor-section-height-default elementor-section-height-default\" data-id=\"dc3a497\" data-element_type=\"section\">\n\t\t\t\t\t\t<div class=\"elementor-container elementor-column-gap-default\">\n\t\t\t\t\t<div class=\"elementor-column elementor-col-100 elementor-top-column elementor-element elementor-element-d679c85\" data-id=\"d679c85\" data-element_type=\"column\">\n\t\t\t<div class=\"elementor-widget-wrap elementor-element-populated\">\n\t\t\t\t\t\t<div class=\"elementor-element elementor-element-8648a06 elementor-widget elementor-widget-text-editor\" data-id=\"8648a06\" data-element_type=\"widget\" data-widget_type=\"text-editor.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t\t\t<p class=\"TIMESSS\" style=\"text-align: justify;\"><strong><span lang=\"EN-CA\">You like testing illusions?\u00a0<\/span><span lang=\"EN-CA\">We invite you to experience an illusory phenomenon linked to the perception of speech!<\/span><\/strong><\/p><p class=\"TIMESSS\" style=\"text-align: justify;\"><span lang=\"EN-CA\">To start the experiment, click on the video.<\/span><\/p>\t\t\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-71bcef5 elementor-widget elementor-widget-video\" data-id=\"71bcef5\" data-element_type=\"widget\" data-settings=\"{&quot;video_type&quot;:&quot;hosted&quot;,&quot;controls&quot;:&quot;yes&quot;}\" data-widget_type=\"video.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t<div class=\"e-hosted-video elementor-wrapper elementor-open-inline\">\n\t\t\t\t\t<video class=\"elementor-video\" src=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2022\/07\/effet-mcgurk_va.mp4\" controls=\"\" preload=\"metadata\" controlsList=\"nodownload\"><\/video>\n\t\t\t\t<\/div>\n\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-20ec8c1 elementor-widget elementor-widget-text-editor\" data-id=\"20ec8c1\" data-element_type=\"widget\" data-widget_type=\"text-editor.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t\t\t<p class=\"TIMESSS\" style=\"text-align: justify;\"><span lang=\"EN-CA\">The \u201cMcGurk\u201d phenomenon was first described in 1976 (<\/span><span lang=\"EN-CA\">McGurk &amp; MacDonald, 1976) and reproduced by many research teams throughout the world, with speakers of various languages. Since then, many research teams have attempted to understand how audiovisual integration works: when does it occur? Is it compulsory? Does it change with age? What brain regions are involved?<\/span><\/p><p class=\"TIMESSS\" style=\"text-align: justify;\"><span lang=\"EN-CA\">Many research teams have studied the phenomenon of audiovisual integration with electroencephalography (EEG), a technique that measures the electrical activity of the brain using electrodes placed on the scalp. EEG measures evoked potentials, which are responses of the central nervous system to stimulation (e.g., to a sound heard). The evoked potentials can be observed on a plot called the\u00a0<em>electroencephalogram<\/em>\u00a0(Luck, 2014). Some of these potentials are associated with audiovisual integration, such as the potentials forming the P1-N1-P2 complex (see figure 1). This complex consists of positive and negative deflections peaking at approximately 50 ms (P1), 100 ms (N1) and 200 ms (P2) after the onset of a stimulus, like a sound. This series of auditory evoked potentials indicates that sound has reached the auditory cortex and that acoustic-phonetic processing has been initiated, that is, that the processing of sound characteristics such as pitch (\u00b1 high) or duration (\u00b1 long), which distinguish the sounds of speech (e.g. the \/p\/ vs. the \/ch\/ sound), has been initiated. It is possible to characterize the evoked potentials as a function of their latency (time between the appearance of the stimulus and the &#8220;peak&#8221; of the evoked potential, measured in milliseconds) and amplitude (&#8220;height&#8221; of the evoked potential, measured in microvolts or \u00b5V &#8211; a microvolt is one millionth of a volt; this is very low electrical activity!).<\/span><\/p>\t\t\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-039774e elementor-widget elementor-widget-image\" data-id=\"039774e\" data-element_type=\"widget\" data-widget_type=\"image.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<img fetchpriority=\"high\" decoding=\"async\" width=\"768\" height=\"490\" src=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-P1-N1-N2-ENG-768x490.png\" class=\"attachment-medium_large size-medium_large wp-image-7889\" alt=\"\" srcset=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-P1-N1-N2-ENG-768x490.png 768w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-P1-N1-N2-ENG-300x191.png 300w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-P1-N1-N2-ENG-1024x654.png 1024w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-P1-N1-N2-ENG-1536x980.png 1536w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-P1-N1-N2-ENG-540x345.png 540w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-P1-N1-N2-ENG-860x549.png 860w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-P1-N1-N2-ENG-1170x747.png 1170w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-P1-N1-N2-ENG.png 1880w\" sizes=\"(max-width: 768px) 100vw, 768px\" \/>\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-5f6668f elementor-widget elementor-widget-text-editor\" data-id=\"5f6668f\" data-element_type=\"widget\" data-widget_type=\"text-editor.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t\t\t<p class=\"TIMESSS\" style=\"text-align: center;\"><strong><span lang=\"EN-CA\">Figure 1.\u00a0<\/span><\/strong><span lang=\"EN-CA\">Diagram illustrating the P1-N1-P2 complex (<\/span><a href=\"https:\/\/commons.wikimedia.org\/wiki\/File:ComponentsofERP.svg\" target=\"_blank\" rel=\"noopener\"><span lang=\"EN-US\">ERP\u00a0<\/span><span lang=\"EN-CA\">components<\/span><\/a><span lang=\"EN-CA\">), adapted from\u00a0<\/span><a href=\"https:\/\/commons.wikimedia.org\/wiki\/User:Choms\" target=\"_blank\" rel=\"noopener\"><span lang=\"EN-CA\">Choms<\/span><\/a><span lang=\"EN-CA\">, under licence\u00a0<\/span><a href=\"https:\/\/creativecommons.org\/licenses\/by-sa\/3.0\/deed.en\" target=\"_blank\" rel=\"noopener\"><span lang=\"EN-CA\">CC BY-SA 3.0<\/span><\/a><span lang=\"EN-CA\">.<\/span><\/p><p style=\"text-align: justify;\">It is well established that, compared to auditory perception alone, adding visual articulatory information decreases the amplitude and latency of the N1\/P2 complex (e.g.\u00a0Besle\u00a0<em>et al.<\/em>, 2004; Klucharev\u00a0<em>et al<\/em>., 2003 ; Treille\u00a0<em>et al<\/em>., 2014a ; Treille<em>et al<\/em>., 2017 ; Treille\u00a0<em>et al<\/em>., 2014b ; Treille<em>\u00a0et al.<\/em>, 2018, van Wassenhove\u00a0<em>et al<\/em>., 2005). Therefore, this complex is generally regarded as a reliable marker of audiovisual integration. In a study led by Pascale Tremblay, the laboratory director, we examined the audiovisual integration process in elderly people by measuring the P1-N1-P2 complex. Our results showed that the P1 component, which marks basic auditory processing, is not affected by age. For components N1 and P2, despite some changes, this complex, and the integration work it reflects, remains quite functional with age. However, the ability to read lips is greatly reduced (<a href=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2022\/05\/tremblay_etal_neuropsychologia_2021.pdf\" target=\"_blank\" rel=\"noopener\">Tremblay\u00a0et al., 2021<\/a>). Other research teams have observed that the McGurk effect worked better in older compared to younger people, suggesting that the use of auditory and visual information to perceive speech changes with age (Hirst\u00a0<em>et al.<\/em>, 2018; McGurk &amp; MacDonald, 1976; Sekiyama\u00a0<em>et al.<\/em>, 2014).<\/p><p style=\"text-align: justify;\">As EEG does not provide precise information on the regions of the brain involved in audiovisual integration, several teams have used magnetic resonance imaging (<a href=\"https:\/\/speechneurolab.ca\/en\/magnetic-resonance-imaging-mri\/\">MRI<\/a>) and neurostimulation to identify the brain regions involved in this process, to complement EEG studies. For a <a href=\"https:\/\/speechneurolab.ca\/en\/meta-analysis\/\">meta-analysis<\/a> on the regions of the brain involved in the integration of audiovisual information during speech perception, see Erickson\u00a0<em>et al<\/em>. (2014a). These regions include the bilateral posterior temporal regions, including the superior temporal sulcus (STS) and the superior temporal gyrus (Beauchamp et al., 2010; Nath &amp; Beauchamp, 2012; Erickson et al., 2014a-b; see figure 2). For example, when the functioning of the left STS is inhibited in participants using transcranial magnetic stimulation (TMS), the likelihood of perceiving the illusion is reduced, suggesting that this region plays an important role in audiovisual integration (Beauchamp et al., 2010). In addition, neural activity within the left STS, as measured using functional\u00a0MRI, is associated with individuals\u2019 sensitivity to the McGurk effect: people with lower activity in this region are less likely to perceive the illusion (Nath &amp; Beauchamp, 2012), and thus, less capable of integrating auditory and visual information.<\/p>\t\t\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-26c5515 elementor-widget elementor-widget-image\" data-id=\"26c5515\" data-element_type=\"widget\" data-widget_type=\"image.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<img decoding=\"async\" width=\"768\" height=\"490\" src=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-Effet-Mc-Gurk-ENG-768x490.png\" class=\"attachment-medium_large size-medium_large wp-image-7891\" alt=\"\" srcset=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-Effet-Mc-Gurk-ENG-768x490.png 768w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-Effet-Mc-Gurk-ENG-300x191.png 300w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-Effet-Mc-Gurk-ENG-1024x654.png 1024w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-Effet-Mc-Gurk-ENG-1536x980.png 1536w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-Effet-Mc-Gurk-ENG-540x345.png 540w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-Effet-Mc-Gurk-ENG-860x549.png 860w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-Effet-Mc-Gurk-ENG-1170x747.png 1170w, https:\/\/speechneurolab.ca\/wp-content\/uploads\/2021\/06\/Facebook-Effet-Mc-Gurk-ENG.png 1880w\" sizes=\"(max-width: 768px) 100vw, 768px\" \/>\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-6b3fd6d elementor-widget elementor-widget-text-editor\" data-id=\"6b3fd6d\" data-element_type=\"widget\" data-widget_type=\"text-editor.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t\t\t<p class=\"TIMESSS\" style=\"text-align: center;\"><strong><span lang=\"EN-CA\">Figure 2.\u00a0<\/span><\/strong><span lang=\"EN-CA\">Localization of certain brain regions that may play a role in audiovisual integration (<a href=\"https:\/\/commons.wikimedia.org\/wiki\/File:Human-brain.SVG\" target=\"_blank\" rel=\"noopener\">human brain<\/a>), adapted from <a href=\"https:\/\/commons.wikimedia.org\/wiki\/User:James.mcd.nz\" target=\"_blank\" rel=\"noopener\">James.md.mz<\/a>, licensed under <a href=\"https:\/\/creativecommons.org\/licenses\/by-sa\/3.0\/deed.en\" target=\"_blank\" rel=\"noopener\">CC BY-SA 3.0<\/a>.<\/span><\/p><p class=\"TIMESSS\" style=\"text-align: justify;\"><span lang=\"EN-CA\">Other regions are also believed to play a role in audiovisual integration, such as the inferior frontal gyrus or IFG (Erickson et al., 2014a; see figure 2). A research team observed in participants a greater activity in the bilateral inferior frontal gyrus when the auditory and visual information was discordant (e.g. audio \/ ba \/ combined with visual \/ ga \/), in comparison to when auditory and visual information matched (e.g. audio \/ ba \/ combined with visual \/ ba \/; Murakami et al., 2018).<\/span><span lang=\"EN-CA\">This region -the IFG- could therefore be important in resolving inconsistencies between auditory and visual information when perceiving speech. This hypothesis is supported by the finding that greater activity in the left IFG is associated with a lower likelihood of the illusion occurring (Murakami et al., 2018).<\/span><\/p><p class=\"TIMESSS\" style=\"text-align: justify;\"><span lang=\"EN-CA\">Motor areas &#8211; those that control our movements &#8211; such as the primary motor cortex or the premotor area, are also believed to be involved in audiovisual integration during speech perception (Erickson et al., 2014a, Skipper et al., 2007; see figure 2). For example, one study observed that TMS applied to the region of the motor cortex that controls the lips decreased the likelihood that the illusion would occur when auditory and visual information was discordant (Murakami et al., 2018). This study, and other similar studies (e.g. <a href=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2022\/05\/Sato_Tremblay_Gracco_2009_BL.pdf\" target=\"_blank\" rel=\"noopener\">Sato et al., 2009<\/a>; <a href=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2022\/05\/Tremblay_Small_2011_NMG.pdf\" target=\"_blank\" rel=\"noopener\">Tremblay &amp; Small, 2011<\/a> &#8211; see also the book chapter by <a href=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2022\/05\/McGettigan_Tremblay_Oxford_Handbook_for_PsyArXiv.pdf\" target=\"_blank\" rel=\"noopener\">McGettigan &amp; Tremblay, 2017<\/a>), suggest that regions of the brain that are used to produce speech are also used to perceive speech, although their specific role in audiovisual integration remains unclear.<\/span><\/p><p class=\"TIMESSS\" style=\"text-align: justify;\"><span lang=\"EN-CA\">In short, several regions of the brain, in the temporal and frontal lobes help integrate auditory and visual information during speech perception and could help produce the McGurk effect!<\/span><\/p><p class=\"TIMESSS\" style=\"text-align: justify;\"><span lang=\"EN-CA\">Here are some other illusions to try, also reflecting the McGurk effect:<\/span><\/p>\t\t\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-a4fc963 elementor-widget elementor-widget-video\" data-id=\"a4fc963\" data-element_type=\"widget\" data-settings=\"{&quot;video_type&quot;:&quot;hosted&quot;,&quot;controls&quot;:&quot;yes&quot;}\" data-widget_type=\"video.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t<div class=\"e-hosted-video elementor-wrapper elementor-open-inline\">\n\t\t\t\t\t<video class=\"elementor-video\" src=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2022\/06\/blogue-effet_mcgurk_vf2.mp4\" controls=\"\" preload=\"metadata\" controlsList=\"nodownload\"><\/video>\n\t\t\t\t<\/div>\n\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-998e5d4 elementor-widget elementor-widget-video\" data-id=\"998e5d4\" data-element_type=\"widget\" data-settings=\"{&quot;video_type&quot;:&quot;hosted&quot;,&quot;controls&quot;:&quot;yes&quot;}\" data-widget_type=\"video.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t<div class=\"e-hosted-video elementor-wrapper elementor-open-inline\">\n\t\t\t\t\t<video class=\"elementor-video\" src=\"https:\/\/speechneurolab.ca\/wp-content\/uploads\/2022\/06\/blogue-effet_mcgurk_vf3.mp4\" controls=\"\" preload=\"metadata\" controlsList=\"nodownload\"><\/video>\n\t\t\t\t<\/div>\n\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-c34b7d9 elementor-widget elementor-widget-text-editor\" data-id=\"c34b7d9\" data-element_type=\"widget\" data-widget_type=\"text-editor.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t\t\t<div><p><span lang=\"EN-CA\">In these two videos, the same audio recording is used, containing the sound \/ba\/. However, the articulation (visual) differs between the two videos. This is why you might have heard the \/va\/ sound in the first video and the \/da\/ sound in the second, even though the aural information is actually the same (\/ba\/).<\/span><\/p><p><span lang=\"EN-CA\">\u00a0<\/span><\/p><p class=\"TIMESSS\"><span lang=\"EN-CA\">Suggested Reading:<\/span><\/p><ul><li class=\"TIMESSS\"><a href=\"https:\/\/speechneurolab.ca\/en\/the-cocktail-party-explained\/\"><span lang=\"EN-CA\">The cocktail party explained<\/span><\/a><\/li><li><a href=\"https:\/\/speechneurolab.ca\/en\/difference-between-speech-language-and-communication\/\">Difference between speech, language and communication<\/a><\/li><li><a href=\"https:\/\/speechneurolab.ca\/en\/tip-of-the-tongue\/\">Tip of the tongue<\/a><\/li><li><a href=\"https:\/\/speechneurolab.ca\/en\/speech-perception-a-complex-ability\/\">Speech perception: a complex ability<\/a><\/li><li><a href=\"https:\/\/speechneurolab.ca\/en\/publication-scientifique-sur-lintegration-audiovisuelle\/\">New scientific article on audiovisual integration<\/a><\/li><\/ul><\/div>\t\t\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t<div class=\"elementor-element elementor-element-97bd600 elementor-widget elementor-widget-text-editor\" data-id=\"97bd600\" data-element_type=\"widget\" data-widget_type=\"text-editor.default\">\n\t\t\t\t<div class=\"elementor-widget-container\">\n\t\t\t\t\t\t\t\t\t<div><p class=\"TIMESSS\"><span style=\"text-align: justify;\">R\u00e9f\u00e9rences :<\/span><\/p><\/div><p style=\"text-align: justify;\">Beauchamp, M. S., Nath, A. R., &amp; Pasalar, S. (2010). fMRI-guided transcranial magnetic stimulation reveals that the superior temporal sulcus is a cortical locus of the McGurk effect. The Journal of Neuroscience, 30(7), 2414\u20132417. DOI: 10.1523\/JNEUROSCI.4865-09.2010<\/p><p style=\"text-align: justify;\">Besle, J., Fort, A., Delpuech, C., &amp; Giard, M.-H. (2004). Bimodal speech: Early suppressive visual effects in human auditory cortex. European Journal of Neuroscience, 20(8), 2225\u20132234. DOI: 10.1111\/j.1460-9568.2004.03670.x<\/p><p style=\"text-align: justify;\">Erickson, L. C., Heeg, E., Rauschecker, J. P., &amp; Turkeltaub, P. E. (2014a). An ALE meta\u2010analysis on the audiovisual integration of speech signals. Human Brain Mapping, 35(11), 5587\u20135605. DOI: 10.1002\/hbm.22572<\/p><p style=\"text-align: justify;\">Erickson, L. C., Zielinski, B. A., Zielinski, J. E. V., Liu, G., Turkeltaub, P. E., Leaver, A. M., &amp; Rauschecker, J. P. (2014b). Distinct cortical locations for integration of audiovisual speech and the McGurk effect. Frontiers in Psychology, 5, Article 534. DOI: 10.3389\/fpsyg.2014.00534<\/p><p style=\"text-align: justify;\">Klucharev, V., M\u00f6tt\u00f6nen, R., &amp; Sams, M. (2003). Electrophysiological indicators of phonetic and non-phonetic multisensory interactions during audiovisual speech perception. Cognitive Brain Research, 18(1), 65\u201375. 10.1016\/j.cogbrainres.2003.09.004\u00a0<\/p><p style=\"text-align: justify;\">Luck, S. J. (2014). An introduction to the event-related potential technique: MIT press.<\/p><p style=\"text-align: justify;\">McGurk, H., and MacDonald, J. (1976). Hearing lips and seeing voices.\u00a0<em>Nature<\/em>\u00a0264, 746\u2013748. DOI: 10.1038\/264746a0<\/p><p style=\"text-align: justify;\">Murakami, T., Abe, M., Wiratman, W., Fujiwara, J., Okamoto, M., Mizuochi-Endo, T., Iwabuchi, T., Makuuchi, M., Yamashita, A., Tiksnadi, A., Chang, F.-Y., Kubo, H., Matsuda, N., Kobayashi, S., Eifuku, S., &amp; Ugawa, Y. (2018). The motor network reduces multisensory illusory perception. The Journal of Neuroscience, 38(45), 9679\u20139688. DOI: 10.1523\/JNEUROSCI.3650-17.2018<\/p><p style=\"text-align: justify;\">Nath, A. R., &amp; Beauchamp, M. S. (2012). A neural basis for interindividual differences in the McGurk effect, a multisensory speech illusion.\u00a0NeuroImage, 59(1), 781\u2013787. DOI: 10.1016\/j.neuroimage.2011.07.024<\/p><p style=\"text-align: justify;\">Sekiyama, K., Soshi, T., &amp; Sakamoto, S. (2014).\u00a0Enhanced audiovisual integration with aging in speech perception: A heightened McGurk effect in older adults. Frontiers in Psychology, 5, Article 323. DOI: 10.3389\/fpsyg.2014.00323<\/p><p style=\"text-align: justify;\">Skipper, J. I., van Wassenhove, V., Nusbaum, H. C., &amp; Small, S. L. (2007). Hearing lips and seeing voices: How cortical areas supporting speech production mediate audiovisual speech perception.\u00a0Cerebral Cortex, 17(10), 2387\u20132399. DOI: 10.1093\/cercor\/bhl147<\/p><p style=\"text-align: justify;\">Treille, A., Cordeboeuf, C., Vilain, C., &amp; Sato, M. (2014a).\u00a0Haptic and visual information speed up the neural processing of auditory speech in live dyadic interactions.\u00a0Neuropsychologia, 57, 71\u201377. DOI:10.1016\/j.neuropsychologia.2014.02.004<\/p><p style=\"text-align: justify;\">Treille, A., Vilain, C., Kandel, S., &amp; Sato, M. (2017).\u00a0Electrophysiological evidence for a self-processing advantage during audiovisual speech integration. Experimental Brain Research, 235(9), 2867\u20132876. DOI: 10.1007\/s00221-017-5018-0<\/p><p style=\"text-align: justify;\">Treille, A., Vilain, C., &amp; Sato, M. (2014b).\u00a0The sound of your lips: Electrophysiological cross-modal interactions during hand-to-face and face-to-face speech perception.\u00a0Frontiers in Psychology, 5, Article 420. DOI: 10.3389\/fpsyg.2014.00420<\/p><p style=\"text-align: justify;\">Treille, A., Vilain, C., Schwartz, J.-L., Hueber, T., &amp;amp; Sato, M. (2018).\u00a0Electrophysiological evidence for Audio-visuo-lingual speech integration. Neuropsychologia, 109, 126\u2013133. DOI: 10.1016\/j.neuropsychologia.2017.12.024<\/p><p style=\"text-align: justify;\">van Wassenhove, V., Grant, K. W., &amp; Poeppel, D. (2005). Visual speech speeds up the neural processing of auditory speech. PNAS Proceedings of the National Academy of Sciences of the United States of America, 102(4), 1181\u20131186. DOI: 10.1073\/pnas.0408949102<\/p><p style=\"text-align: justify;\">\u00a0<\/p><p style=\"text-align: justify;\">Further reading:<\/p><p style=\"text-align: justify;\">Tiippana, K. (2014) What is the McGurk effect? Frontiers in Psychology, 5, DOI:10.3389\/fpsyg.2014.00725\u00a0\u00a0\u00a0\u00a0<\/p>\t\t\t\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t\t\t<\/div>\n\t\t<\/div>\n\t\t\t\t\t<\/div>\n\t\t<\/section>\n\t\t\t\t<\/div>\n\t\t","protected":false},"excerpt":{"rendered":"<p>You like testing illusions?\u00a0We invite you to experience an illusory phenomenon linked to the perception of speech!<\/p>\n","protected":false},"author":3,"featured_media":2956,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[44],"tags":[],"ppma_author":[55,54],"class_list":["post-3595","post","type-post","status-publish","format-standard","has-post-thumbnail","hentry","category-vulgarisation-scientifique"],"authors":[{"term_id":55,"user_id":3,"is_guest":0,"slug":"admin-marilyne","display_name":"Marilyne Joyal","avatar_url":"https:\/\/secure.gravatar.com\/avatar\/?s=96&d=mm&r=g","author_category":"","user_url":"","last_name":"Joyal","first_name":"Marilyne","job_title":"","description":""},{"term_id":54,"user_id":2,"is_guest":0,"slug":"admin-pascale","display_name":"Pascale Tremblay","avatar_url":"https:\/\/secure.gravatar.com\/avatar\/ea9e5826afc1fd507cc7b89eaca37953ea310ad30088c3920137ab8e86846244?s=96&d=mm&r=g","author_category":"","user_url":"","last_name":"Tremblay","first_name":"Pascale","job_title":"","description":""}],"_links":{"self":[{"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/posts\/3595","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/users\/3"}],"replies":[{"embeddable":true,"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/comments?post=3595"}],"version-history":[{"count":29,"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/posts\/3595\/revisions"}],"predecessor-version":[{"id":9885,"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/posts\/3595\/revisions\/9885"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/media\/2956"}],"wp:attachment":[{"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/media?parent=3595"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/categories?post=3595"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/tags?post=3595"},{"taxonomy":"author","embeddable":true,"href":"https:\/\/speechneurolab.ca\/en\/wp-json\/wp\/v2\/ppma_author?post=3595"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}