<?xml version="1.0" encoding="UTF-8"?>
<!-- このサイトマップは、2026-04-03の18:25に、WordPress 用のオリジナル SEO プラグイン All in One SEO v4.4.7.1により動的生成されました。 -->

<?xml-stylesheet type="text/xsl" href="http://www.mlab.phys.waseda.ac.jp/default-sitemap.xsl"?>

<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
	<channel>
		<title>森島研究室</title>
		<link><![CDATA[http://www.mlab.phys.waseda.ac.jp]]></link>
		<description><![CDATA[森島研究室]]></description>
		<lastBuildDate><![CDATA[Tue, 20 Oct 2020 00:02:06 +0000]]></lastBuildDate>
		<docs>https://validator.w3.org/feed/docs/rss2.html</docs>
		<atom:link href="http://www.mlab.phys.waseda.ac.jp/sitemap.rss" rel="self" type="application/rss+xml" />
		<ttl><![CDATA[60]]></ttl>

		<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/linsss-linear-decomposition-of-heterogeneous-subsurface-scattering-for-real-time-screen-space-rendering/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/linsss-linear-decomposition-of-heterogeneous-subsurface-scattering-for-real-time-screen-space-rendering/]]></link>
			<title>LinSSS: linear decomposition of heterogeneous subsurface scattering for real-time screen-space rendering</title>
			<pubDate><![CDATA[Tue, 20 Oct 2020 00:02:06 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/audio-visual-object-removal-in-360-degree-videos/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/audio-visual-object-removal-in-360-degree-videos/?lang=en]]></link>
			<title>Audio-Visual Object Removal in 360-Degree Videos</title>
			<pubDate><![CDATA[Tue, 20 Oct 2020 00:02:05 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/song2face-synthesizing-singing-facial-animation-from-audio-2/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/song2face-synthesizing-singing-facial-animation-from-audio-2/?lang=en]]></link>
			<title>Song2Face: Synthesizing Singing Facial Animation from Audio</title>
			<pubDate><![CDATA[Tue, 01 Dec 2020 00:00:37 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/linechaser-%e8%a6%96%e8%a6%9a%e9%9a%9c%e7%a2%8d%e8%80%85%e3%81%8c%e5%88%97%e3%81%ab%e4%b8%a6%e3%81%b6%e3%81%9f%e3%82%81%e3%81%ae%e3%82%b9%e3%83%9e%e3%83%bc%e3%83%88%e3%83%95%e3%82%a9%e3%83%b3/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/linechaser-%e8%a6%96%e8%a6%9a%e9%9a%9c%e7%a2%8d%e8%80%85%e3%81%8c%e5%88%97%e3%81%ab%e4%b8%a6%e3%81%b6%e3%81%9f%e3%82%81%e3%81%ae%e3%82%b9%e3%83%9e%e3%83%bc%e3%83%88%e3%83%95%e3%82%a9%e3%83%b3/]]></link>
			<title>LineChaser: 視覚障碍者が列に並ぶためのスマートフォン型支援システム</title>
			<pubDate><![CDATA[Thu, 19 Nov 2020 08:29:52 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/visual-computing-2020-3/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/visual-computing-2020-3/]]></link>
			<title>Visual Computing 2020</title>
			<pubDate><![CDATA[Thu, 19 Nov 2020 08:24:54 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/wiss-2020-%e7%ac%ac28%e5%9b%9e%e3%82%a4%e3%83%b3%e3%82%bf%e3%83%a9%e3%82%af%e3%83%86%e3%82%a3%e3%83%96%e3%82%b7%e3%82%b9%e3%83%86%e3%83%a0%e3%81%a8%e3%82%bd%e3%83%95%e3%83%88%e3%82%a6%e3%82%a7/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/wiss-2020-%e7%ac%ac28%e5%9b%9e%e3%82%a4%e3%83%b3%e3%82%bf%e3%83%a9%e3%82%af%e3%83%86%e3%82%a3%e3%83%96%e3%82%b7%e3%82%b9%e3%83%86%e3%83%a0%e3%81%a8%e3%82%bd%e3%83%95%e3%83%88%e3%82%a6%e3%82%a7/]]></link>
			<title>WISS 2020: 第28回インタラクティブシステムとソフトウェアに関するワークショップ</title>
			<pubDate><![CDATA[Thu, 19 Nov 2020 08:21:10 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/asynchronous-eulerian-liquid-simulation-2/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/asynchronous-eulerian-liquid-simulation-2/]]></link>
			<title>Asynchronous Eulerian Liquid Simulation</title>
			<pubDate><![CDATA[Thu, 19 Nov 2020 08:19:57 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/visual-computing-2020-2/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/visual-computing-2020-2/]]></link>
			<title>Visual Computing 2020</title>
			<pubDate><![CDATA[Thu, 19 Nov 2020 08:17:56 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/gpu-smoke-simulation-with-adaptive-dct-compression/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/gpu-smoke-simulation-with-adaptive-dct-compression/]]></link>
			<title>GPU Smoke Simulation with Adaptive DCT Compression</title>
			<pubDate><![CDATA[Thu, 19 Nov 2020 08:17:26 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/visual-computing-2020/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/visual-computing-2020/]]></link>
			<title>Visual Computing 2020</title>
			<pubDate><![CDATA[Thu, 19 Nov 2020 08:15:57 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/%e7%ac%ac23%e5%9b%9e-%e7%94%bb%e5%83%8f%e3%81%ae%e8%aa%8d%e8%ad%98%e3%83%bb%e7%90%86%e8%a7%a3%e3%82%b7%e3%83%b3%e3%83%9d%e3%82%b8%e3%82%a6%e3%83%a0%ef%bc%88miru2020%ef%bc%89-3/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/%e7%ac%ac23%e5%9b%9e-%e7%94%bb%e5%83%8f%e3%81%ae%e8%aa%8d%e8%ad%98%e3%83%bb%e7%90%86%e8%a7%a3%e3%82%b7%e3%83%b3%e3%83%9d%e3%82%b8%e3%82%a6%e3%83%a0%ef%bc%88miru2020%ef%bc%89-3/]]></link>
			<title>第23回 画像の認識・理解シンポジウム（MIRU2020）</title>
			<pubDate><![CDATA[Thu, 19 Nov 2020 08:10:46 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/%e7%ac%ac23%e5%9b%9e-%e7%94%bb%e5%83%8f%e3%81%ae%e8%aa%8d%e8%ad%98%e3%83%bb%e7%90%86%e8%a7%a3%e3%82%b7%e3%83%b3%e3%83%9d%e3%82%b8%e3%82%a6%e3%83%a0%ef%bc%88miru2020%ef%bc%89-2/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/%e7%ac%ac23%e5%9b%9e-%e7%94%bb%e5%83%8f%e3%81%ae%e8%aa%8d%e8%ad%98%e3%83%bb%e7%90%86%e8%a7%a3%e3%82%b7%e3%83%b3%e3%83%9d%e3%82%b8%e3%82%a6%e3%83%a0%ef%bc%88miru2020%ef%bc%89-2/]]></link>
			<title>第23回 画像の認識・理解シンポジウム（MIRU2020）</title>
			<pubDate><![CDATA[Thu, 19 Nov 2020 08:08:57 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/linechaser-a-smartphone-based-navigation-system-for-blind-people-to-stand-in-lines/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/linechaser-a-smartphone-based-navigation-system-for-blind-people-to-stand-in-lines/?lang=en]]></link>
			<title>LineChaser: A Smartphone-Based Navigation System for Blind People to Stand in Lines</title>
			<pubDate><![CDATA[Thu, 13 May 2021 00:02:21 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/%e3%82%b3%e3%83%b3%e3%82%bf%e3%82%af%e3%83%88/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/%e3%82%b3%e3%83%b3%e3%82%bf%e3%82%af%e3%83%88/]]></link>
			<title>コンタクト</title>
			<pubDate><![CDATA[Thu, 12 Oct 2023 08:30:30 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/song2face-synthesizing-singing-facial-animation-from-audio/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/song2face-synthesizing-singing-facial-animation-from-audio/?lang=en]]></link>
			<title>Song2Face: Synthesizing Singing Facial Animation from Audio</title>
			<pubDate><![CDATA[Thu, 10 Dec 2020 00:00:05 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/computer-vision-accv-2020/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/computer-vision-accv-2020/]]></link>
			<title>Computer Vision – ACCV 2020</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 05:07:43 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/computer-vision-accv-2020/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/computer-vision-accv-2020/?lang=en]]></link>
			<title>Computer Vision – ACCV 2020</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 05:07:43 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/siggraph-asia-2020-technical-communications/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/siggraph-asia-2020-technical-communications/]]></link>
			<title>SIGGRAPH Asia 2020 Technical Communications</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 05:02:30 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/siggraph-asia-2020-technical-communications/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/siggraph-asia-2020-technical-communications/?lang=en]]></link>
			<title>SIGGRAPH Asia 2020 Technical Communications</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 05:02:30 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/song2face-synthesizing-singing-facial-animation-from-audio-2/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/song2face-synthesizing-singing-facial-animation-from-audio-2/]]></link>
			<title>Song2Face: Synthesizing Singing Facial Animation from Audio</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:58:57 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/do-we-need-sound-for-sound-source-localization-2/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/do-we-need-sound-for-sound-source-localization-2/]]></link>
			<title>Do We Need Sound for Sound Source Localization?</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:56:29 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/asian-conference-on-computer-vision-2020-accv2020/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/asian-conference-on-computer-vision-2020-accv2020/]]></link>
			<title>Asian Conference on Computer Vision 2020 (ACCV2020)</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:56:10 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/asian-conference-on-computer-vision-2020-accv2020/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/asian-conference-on-computer-vision-2020-accv2020/?lang=en]]></link>
			<title>Asian Conference on Computer Vision 2020 (ACCV2020)</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:56:10 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/4217/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/4217/?lang=en]]></link>
			<title>CHI Conference on Human Factors in Computing Systems (CHI &#8217;21)</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:55:28 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/journal-of-information-processing-2/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/journal-of-information-processing-2/?lang=en]]></link>
			<title>Journal of Information Processing</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:54:58 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/journal-of-information-processing-2/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/journal-of-information-processing-2/]]></link>
			<title>Journal of Information Processing</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:54:58 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/25th-international-conference-on-pattern-recognition-icpr2020/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/25th-international-conference-on-pattern-recognition-icpr2020/?lang=en]]></link>
			<title>25th International Conference on Pattern Recognition (ICPR2020)</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:53:46 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/25th-international-conference-on-pattern-recognition-icpr2020/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/25th-international-conference-on-pattern-recognition-icpr2020/]]></link>
			<title>25th International Conference on Pattern Recognition (ICPR2020)</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:53:46 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/siggraph-asia-2020/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/siggraph-asia-2020/]]></link>
			<title>SIGGRAPH Asia 2020</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:53:06 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/siggraph-asia-2020/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news-en/siggraph-asia-2020/?lang=en]]></link>
			<title>SIGGRAPH Asia 2020</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:53:06 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/mirrornet-a-deep-reflective-approach-to-2d-pose-estimation-for-single-person-images/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/mirrornet-a-deep-reflective-approach-to-2d-pose-estimation-for-single-person-images/]]></link>
			<title>MirrorNet: A Deep Reflective Approach to 2D Pose Estimation for Single-Person Images</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:47:29 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/song2face-synthesizing-singing-facial-animation-from-audio/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/song2face-synthesizing-singing-facial-animation-from-audio/]]></link>
			<title>Song2Face: Synthesizing Singing Facial Animation from Audio</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:43:16 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/do-we-need-sound-for-sound-source-localization/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/do-we-need-sound-for-sound-source-localization/]]></link>
			<title>Do We Need Sound for Sound Source Localization?</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:40:56 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/linechaser-a-smartphone-based-navigation-system-for-blind-people-to-stand-in-lines/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/linechaser-a-smartphone-based-navigation-system-for-blind-people-to-stand-in-lines/]]></link>
			<title>LineChaser: A Smartphone-Based Navigation System for Blind People to Stand in Lines</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:36:34 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/object-oriented-representation-learning%e3%81%ae%e5%ae%9f%e4%b8%96%e7%95%8c%e3%83%87%e3%83%bc%e3%82%bf%e9%81%a9%e7%94%a8%e3%81%ab%e5%90%91%e3%81%91%e3%81%9f%e6%9c%80%e6%96%b0%e6%89%8b%e6%b3%95/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/object-oriented-representation-learning%e3%81%ae%e5%ae%9f%e4%b8%96%e7%95%8c%e3%83%87%e3%83%bc%e3%82%bf%e9%81%a9%e7%94%a8%e3%81%ab%e5%90%91%e3%81%91%e3%81%9f%e6%9c%80%e6%96%b0%e6%89%8b%e6%b3%95/]]></link>
			<title>Object-oriented Representation Learningの実世界データ適用に向けた最新手法の性能分析</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:35:04 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/visual-computing-2020-4/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/visual-computing-2020-4/]]></link>
			<title>Visual Computing 2020</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:25:09 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/adversarial-knowledge-distillation-for-a-compact-generator/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/adversarial-knowledge-distillation-for-a-compact-generator/]]></link>
			<title>Adversarial Knowledge Distillation for a Compact Generator</title>
			<pubDate><![CDATA[Sun, 28 Feb 2021 04:20:03 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/blindpilot-a-robotic-local-navigation-system-that-leads-blind-people-to-a-landmark-object/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/blindpilot-a-robotic-local-navigation-system-that-leads-blind-people-to-a-landmark-object/]]></link>
			<title>BlindPilot: A Robotic Local Navigation System that Leads Blind People to a Landmark Object</title>
			<pubDate><![CDATA[Sun, 18 Oct 2020 14:08:13 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/blindpilot-a-robotic-local-navigation-system-that-leads-blind-people-to-a-landmark-object/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/blindpilot-a-robotic-local-navigation-system-that-leads-blind-people-to-a-landmark-object/?lang=en]]></link>
			<title>BlindPilot: A Robotic Local Navigation System that Leads Blind People to a Landmark Object</title>
			<pubDate><![CDATA[Sun, 18 Oct 2020 14:08:13 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/team/shigeo-morishima-2/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/team/shigeo-morishima-2/?lang=en]]></link>
			<title>Shigeo MORISHIMA</title>
			<pubDate><![CDATA[Sun, 18 Apr 2021 14:49:10 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/team/%e6%a3%ae%e5%b3%b6-%e7%b9%81%e7%94%9f/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/team/%e6%a3%ae%e5%b3%b6-%e7%b9%81%e7%94%9f/]]></link>
			<title>森島 繁生</title>
			<pubDate><![CDATA[Sun, 18 Apr 2021 14:46:57 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/adversarial-knowledge-distillation-for-a-compact-generator/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/adversarial-knowledge-distillation-for-a-compact-generator/?lang=en]]></link>
			<title>Adversarial Knowledge Distillation for a Compact Generator</title>
			<pubDate><![CDATA[Sun, 10 Jan 2021 00:00:14 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/chi-conference-on-human-factors-in-computing-systems-chi21/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/news/chi-conference-on-human-factors-in-computing-systems-chi21/]]></link>
			<title>CHI Conference on Human Factors in Computing Systems (CHI &#8217;21)</title>
			<pubDate><![CDATA[Sat, 10 Apr 2021 13:50:48 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/mirrornet-a-deep-reflective-approach-to-2d-pose-estimation-for-single-person-images/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/mirrornet-a-deep-reflective-approach-to-2d-pose-estimation-for-single-person-images/?lang=en]]></link>
			<title>MirrorNet: A Deep Reflective Approach to 2D Pose Estimation for Single-Person Images</title>
			<pubDate><![CDATA[Sat, 01 May 2021 00:01:14 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/do-we-need-sound-for-sound-source-localization/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/do-we-need-sound-for-sound-source-localization/?lang=en]]></link>
			<title>Do We Need Sound for Sound Source Localization?</title>
			<pubDate><![CDATA[Mon, 30 Nov 2020 00:00:59 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/%e9%9f%b3%e3%81%a8%e7%94%bb%e5%83%8f%e3%82%92%e7%94%a8%e3%81%84%e3%81%9f%e6%bc%94%e5%a5%8f%e3%81%ae%e9%9f%b3%e3%81%ab%e9%80%a3%e5%8b%95%e3%81%97%e3%81%9f%e5%8b%95%e7%94%bb%e7%94%9f%e6%88%90/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/%e9%9f%b3%e3%81%a8%e7%94%bb%e5%83%8f%e3%82%92%e7%94%a8%e3%81%84%e3%81%9f%e6%bc%94%e5%a5%8f%e3%81%ae%e9%9f%b3%e3%81%ab%e9%80%a3%e5%8b%95%e3%81%97%e3%81%9f%e5%8b%95%e7%94%bb%e7%94%9f%e6%88%90/]]></link>
			<title>音と画像を用いた演奏の音に連動した動画生成</title>
			<pubDate><![CDATA[Mon, 19 Oct 2020 12:14:39 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/object-aware%e8%a1%a8%e7%8f%be%e5%ad%a6%e7%bf%92%e3%81%ae%e5%ae%89%e5%ae%9a%e5%8c%96%e3%81%ae%e3%81%9f%e3%82%81%e3%81%aekl%e3%83%80%e3%82%a4%e3%83%90%e3%83%bc%e3%82%b8%e3%82%a7%e3%83%b3%e3%82%b9/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/object-aware%e8%a1%a8%e7%8f%be%e5%ad%a6%e7%bf%92%e3%81%ae%e5%ae%89%e5%ae%9a%e5%8c%96%e3%81%ae%e3%81%9f%e3%82%81%e3%81%aekl%e3%83%80%e3%82%a4%e3%83%90%e3%83%bc%e3%82%b8%e3%82%a7%e3%83%b3%e3%82%b9/]]></link>
			<title>Object-aware表現学習の安定化のためのKLダイバージェンスの周期性アニーリング</title>
			<pubDate><![CDATA[Mon, 19 Oct 2020 12:11:18 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/do-we-need-sound-for-sound-source-localization-2/?lang=en]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/do-we-need-sound-for-sound-source-localization-2/?lang=en]]></link>
			<title>Do We Need Sound for Sound Source Localization?</title>
			<pubDate><![CDATA[Mon, 01 Feb 2021 00:00:46 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/]]></link>
			<title>ホーム</title>
			<pubDate><![CDATA[Fri, 29 Jan 2021 07:31:51 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/style-controllable-facial-animation-synthesis-from-singing-audio/]]></guid>
			<link><![CDATA[http://www.mlab.phys.waseda.ac.jp/portfolio/style-controllable-facial-animation-synthesis-from-singing-audio/]]></link>
			<title>Style Controllable Facial Animation Synthesis from SInging Audio</title>
			<pubDate><![CDATA[Fri, 20 Nov 2020 06:17:15 +0000]]></pubDate>
		</item>
				</channel>
</rss>
