<?xml version="1.0"?>
<?xml-stylesheet type="text/css" href="http://www.cslt.org/mediawiki/skins/common/feed.css?303"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="zh-cn">
		<id>http://www.cslt.org/mediawiki/index.php?action=history&amp;feed=atom&amp;title=Text-2015-01-28</id>
		<title>Text-2015-01-28 - 版本历史</title>
		<link rel="self" type="application/atom+xml" href="http://www.cslt.org/mediawiki/index.php?action=history&amp;feed=atom&amp;title=Text-2015-01-28"/>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php?title=Text-2015-01-28&amp;action=history"/>
		<updated>2026-04-03T19:09:26Z</updated>
		<subtitle>本wiki的该页面的版本历史</subtitle>
		<generator>MediaWiki 1.23.3</generator>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php?title=Text-2015-01-28&amp;diff=13735&amp;oldid=prev</id>
		<title>Lr：/* list paper */</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php?title=Text-2015-01-28&amp;diff=13735&amp;oldid=prev"/>
				<updated>2015-01-28T02:08:37Z</updated>
		
		<summary type="html">&lt;p&gt;‎&lt;span dir=&quot;auto&quot;&gt;&lt;span class=&quot;autocomment&quot;&gt;list paper&lt;/span&gt;&lt;/span&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2015年1月28日 (三) 02:08的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第13行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第13行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=list paper=&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=list paper=&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;* deep learing bibiography[http://memkite.com/deep-learning-bibliography/]&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;* deep learning and representation learning wordkshop(NIPS)&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Deep Learning and Representation Learning Workshop: NIPS 2014 --Accepted papers==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Deep Learning and Representation Learning Workshop: NIPS 2014 --Accepted papers==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;*Oral presentations:&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;*Oral presentations:&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Lr</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php?title=Text-2015-01-28&amp;diff=13734&amp;oldid=prev</id>
		<title>Lr：/* ready to share paper */</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php?title=Text-2015-01-28&amp;diff=13734&amp;oldid=prev"/>
				<updated>2015-01-28T02:03:16Z</updated>
		
		<summary type="html">&lt;p&gt;‎&lt;span dir=&quot;auto&quot;&gt;&lt;span class=&quot;autocomment&quot;&gt;ready to share paper&lt;/span&gt;&lt;/span&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2015年1月28日 (三) 02:03的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第2行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第2行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;* '''Deep Learning for Answer Sentence Selection'''[http://arxiv.org/pdf/1412.1632v1.pdf] (#36)Lei Yu, Karl Moritz Hermann, Phil Blunsom, Stephen Pulman('''Tianyi Luo''')&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;* '''Deep Learning for Answer Sentence Selection'''[http://arxiv.org/pdf/1412.1632v1.pdf] (#36)Lei Yu, Karl Moritz Hermann, Phil Blunsom, Stephen Pulman('''Tianyi Luo''')&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;*'''Retrofitting Word Vectors to Semantic Lexicons '''(#34)Manaal Faruqui, Jesse Dodge, Sujay Jauhar, Chris Dyer, Eduard Hovy, Noah Smith('''Chaos''')&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;*'''Retrofitting Word Vectors to Semantic Lexicons '''(#34)Manaal Faruqui, Jesse Dodge, Sujay Jauhar, Chris Dyer, Eduard Hovy, Noah Smith('''Chaos''')&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;* A Clockwork RNN [http://arxiv.org/pdf/1402.3511v1.pdf] (WXX)&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==choose paper==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==choose paper==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Lr</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php?title=Text-2015-01-28&amp;diff=13686&amp;oldid=prev</id>
		<title>2015年1月26日 (一) 07:24 Lr</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php?title=Text-2015-01-28&amp;diff=13686&amp;oldid=prev"/>
				<updated>2015-01-26T07:24:21Z</updated>
		
		<summary type="html">&lt;p&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2015年1月26日 (一) 07:24的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==ready to share paper==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==ready to share paper==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;* '''Deep Learning for Answer Sentence Selection'''[http://arxiv.org/pdf/1412.1632v1.pdf] (#36)Lei Yu, Karl Moritz Hermann, Phil Blunsom, Stephen Pulman('''Tianyi Luo''')&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;*'''Retrofitting Word Vectors to Semantic Lexicons '''(#34)Manaal Faruqui, Jesse Dodge, Sujay Jauhar, Chris Dyer, Eduard Hovy, Noah Smith('''Chaos''')&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==choose paper==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==choose paper==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Lr</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php?title=Text-2015-01-28&amp;diff=13685&amp;oldid=prev</id>
		<title>Lr：以“==ready to share paper==  ==choose paper== * '''Document Embedding with Paragraph Vectors'''[http://125.178.23.34/wp-content/uploads/2014/12/Document-Embedding-with-...”为内容创建页面</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php?title=Text-2015-01-28&amp;diff=13685&amp;oldid=prev"/>
				<updated>2015-01-26T07:23:38Z</updated>
		
		<summary type="html">&lt;p&gt;以“==ready to share paper==  ==choose paper== * &amp;#039;&amp;#039;&amp;#039;Document Embedding with Paragraph Vectors&amp;#039;&amp;#039;&amp;#039;[http://125.178.23.34/wp-content/uploads/2014/12/Document-Embedding-with-...”为内容创建页面&lt;/p&gt;
&lt;p&gt;&lt;b&gt;新页面&lt;/b&gt;&lt;/p&gt;&lt;div&gt;==ready to share paper==&lt;br /&gt;
&lt;br /&gt;
==choose paper==&lt;br /&gt;
* '''Document Embedding with Paragraph Vectors'''[http://125.178.23.34/wp-content/uploads/2014/12/Document-Embedding-with-Paragraph-Vectors.pdf] (#68)Andrew Dai, Christopher Olah, Quoc Le, Greg Corrado ('''Rong Liu''')&lt;br /&gt;
* '''Deep Learning for Answer Sentence Selection'''[http://arxiv.org/pdf/1412.1632v1.pdf] (#36)Lei Yu, Karl Moritz Hermann, Phil Blunsom, Stephen Pulman('''Tianyi Luo''')&lt;br /&gt;
*'''Retrofitting Word Vectors to Semantic Lexicons '''(#34)Manaal Faruqui, Jesse Dodge, Sujay Jauhar, Chris Dyer, Eduard Hovy, Noah Smith('''Chaos''')&lt;br /&gt;
*'''Autoencoder Trees '''(#5)Ozan Irsoy, Ethem Alpaydin('''Xi Ma''') &lt;br /&gt;
*A Winner-Take-All Method for Training Sparse Convolutional Autoencoders (#10)Alireza Makhzani, Brendan Frey ('''Shallsee''')&lt;br /&gt;
*Understanding Locally Competitive Networks (#15)Rupesh Srivastava, Jonathan Masci, Faustino Gomez, Jurgen Schmidhuber ('''Shallsee''')&lt;br /&gt;
&lt;br /&gt;
=list paper=&lt;br /&gt;
==Deep Learning and Representation Learning Workshop: NIPS 2014 --Accepted papers==&lt;br /&gt;
*Oral presentations:&lt;br /&gt;
&lt;br /&gt;
cuDNN: Efficient Primitives for Deep Learning (#49)Sharan Chetlur, Cliff Woolley, Philippe Vandermersch, Jonathan Cohen, John Tran, Bryan Catanzaro, Evan Shelhamer&lt;br /&gt;
&lt;br /&gt;
Distilling the Knowledge in a Neural Network (#65)Geoffrey Hinton, Oriol Vinyals, Jeff Dean&lt;br /&gt;
&lt;br /&gt;
Supervised Learning in Dynamic Bayesian Networks (#54)Shamim Nemati, Ryan Adams&lt;br /&gt;
&lt;br /&gt;
Deeply-Supervised Nets (#2)Chen-Yu Lee, Saining Xie, Patrick Gallagher, Zhengyou Zhang, Zhuowen Tu&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
*Posters, morning session (11:30-14:45):&lt;br /&gt;
&lt;br /&gt;
Unsupervised Feature Learning from Temporal Data (#3)Ross Goroshin, Joan Bruna, Arthur Szlam, Jonathan Tompson, David Eigen, Yann LeCun&lt;br /&gt;
&lt;br /&gt;
Autoencoder Trees (#5)Ozan Irsoy, Ethem Alpaydin&lt;br /&gt;
&lt;br /&gt;
Scheduled denoising autoencoders (#6)Krzysztof Geras, Charles Sutton&lt;br /&gt;
&lt;br /&gt;
Learning to Deblur (#8)Christian Schuler, Michael Hirsch, Stefan Harmeling, Bernhard Schölkopf&lt;br /&gt;
&lt;br /&gt;
A Winner-Take-All Method for Training Sparse Convolutional Autoencoders (#10)Alireza Makhzani, Brendan Frey&lt;br /&gt;
&lt;br /&gt;
&amp;quot;Mental Rotation&amp;quot; by Optimizing Transforming Distance (#11)Weiguang Ding, Graham Taylor&lt;br /&gt;
&lt;br /&gt;
On Importance of Base Model Covariance for Annealing Gaussian RBMs (#12)Taichi Kiwaki, Kazuyuki Aihara&lt;br /&gt;
&lt;br /&gt;
Ultrasound Standard Plane Localization via Spatio-Temporal Feature Learning with Knowledge Transfer (#14)Hao Chen, Dong Ni, Ling Wu, Sheng Li, Pheng Heng&lt;br /&gt;
&lt;br /&gt;
Understanding Locally Competitive Networks (#15)Rupesh Srivastava, Jonathan Masci, Faustino Gomez, Jurgen Schmidhuber&lt;br /&gt;
&lt;br /&gt;
Unsupervised pre-training speeds up the search for good features: an analysis of a simplified model of neural network learning (#18)Avraham Ruderman&lt;br /&gt;
&lt;br /&gt;
Analyzing Feature Extraction by Contrastive Divergence Learning in RBMs (#19)Ryo Karakida, Masato Okada, Shun-ichi Amari&lt;br /&gt;
&lt;br /&gt;
Deep Tempering (#20)Guillaume Desjardins, Heng Luo, Aaron Courville, Yoshua Bengio&lt;br /&gt;
&lt;br /&gt;
Learning Word Representations with Hierarchical Sparse Coding (#21)Dani Yogatama, Manaal Faruqui, Chris Dyer, Noah Smith&lt;br /&gt;
&lt;br /&gt;
Deep Learning as an Opportunity in Virtual Screening (#23)Thomas Unterthiner, Andreas Mayr, Günter Klambauer, Marvin Steijaert, Jörg Wenger, Hugo Ceulemans, Sepp Hochreiter&lt;br /&gt;
&lt;br /&gt;
Revisit Long Short-Term Memory: An Optimization Perspective (#24)Qi Lyu, J Zhu&lt;br /&gt;
&lt;br /&gt;
Locally Scale-Invariant Convolutional Neural Networks (#26)Angjoo Kanazawa, David Jacobs, Abhishek Sharma&lt;br /&gt;
&lt;br /&gt;
Deep Exponential Families (#28)Rajesh Ranganath, Linpeng Tang, Laurent Charlin, David Blei&lt;br /&gt;
&lt;br /&gt;
Techniques for Learning Binary Stochastic Feedforward Neural Networks (#29)Tapani Raiko, mathias Berglund, Guillaume Alain, Laurent Dinh&lt;br /&gt;
&lt;br /&gt;
Inside-Outside Semantics: A Framework for Neural Models of Semantic Composition (#30)Phong Le, Willem Zuidema&lt;br /&gt;
&lt;br /&gt;
Deep Multi-Instance Transfer Learning (#32)Dimitrios Kotzias, Misha Denil, Phil Blunsom, Nando De Freitas&lt;br /&gt;
&lt;br /&gt;
Unifying Visual-Semantic Embeddings with Multimodal Neural Language Models (#33)Ryan Kiros, Ruslan Salakhutdinov, Richard Zemel&lt;br /&gt;
&lt;br /&gt;
'''Retrofitting Word Vectors to Semantic Lexicons '''(#34)Manaal Faruqui, Jesse Dodge, Sujay Jauhar, Chris Dyer, Eduard Hovy, Noah Smith&lt;br /&gt;
&lt;br /&gt;
Deep Sequential Neural Network (#35)Ludovic Denoyer, Patrick Gallinari&lt;br /&gt;
&lt;br /&gt;
Efficient Training Strategies for Deep Neural Network Language Models (#71)Holger Schwenk&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
*Posters, afternoon session (17:00-18:30):&lt;br /&gt;
&lt;br /&gt;
'''Deep Learning for Answer Sentence Selection''' (#36)Lei Yu, Karl Moritz Hermann, Phil Blunsom, Stephen Pulman&lt;br /&gt;
&lt;br /&gt;
Synthetic Data and Artificial Neural Networks for Natural Scene Text Recognition (#37)Max Jaderberg, Karen Simonyan, Andrea Vedaldi, Andrew Zisserman&lt;br /&gt;
&lt;br /&gt;
Learning Torque-Driven Manipulation Primitives with a Multilayer Neural Network (#39)Sergey Levine, Pieter Abbeel&lt;br /&gt;
&lt;br /&gt;
SimNets: A Generalization of Convolutional Networks (#41)Nadav Cohen, Amnon Shashua&lt;br /&gt;
&lt;br /&gt;
Phonetics embedding learning with side information (#44)Gabriel Synnaeve, Thomas Schatz, Emmanuel Dupoux&lt;br /&gt;
&lt;br /&gt;
End-to-end Continuous Speech Recognition using Attention-based Recurrent NN: First Results (#45)Jan Chorowski, Dzmitry Bahdanau, KyungHyun Cho, Yoshua Bengio&lt;br /&gt;
&lt;br /&gt;
BILBOWA: Fast Bilingual Distributed Representations without Word Alignments (#46)Stephan Gouws, Yoshua Bengio, Greg Corrado&lt;br /&gt;
&lt;br /&gt;
Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling (#47)Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, Yoshua Bengio&lt;br /&gt;
&lt;br /&gt;
Reweighted Wake-Sleep (#48)Jorg Bornschein, Yoshua Bengio&lt;br /&gt;
&lt;br /&gt;
Explain Images with Multimodal Recurrent Neural Networks (#51)Junhua Mao, Wei Xu, Yi Yang, Jiang Wang, Alan Yuille&lt;br /&gt;
&lt;br /&gt;
Rectified Factor Networks and Dropout (#53)Djork-Arné Clevert, Thomas Unterthiner, Sepp Hochreiter&lt;br /&gt;
&lt;br /&gt;
Towards Deep Neural Network Architectures Robust to Adversarials (#55)Shixiang Gu, Luca Rigazio&lt;br /&gt;
&lt;br /&gt;
Making Dropout Invariant to Transformations of Activation Functions and Inputs (#56)Jimmy Ba, Hui Yuan Xiong, Brendan Frey&lt;br /&gt;
&lt;br /&gt;
Aspect Specific Sentiment Analysis using Hierarchical Deep Learning (#58)Himabindu Lakkaraju, Richard Socher, Chris Manning&lt;br /&gt;
&lt;br /&gt;
Deep Directed Generative Autoencoders (#59)Sherjil Ozair, Yoshua Bengio&lt;br /&gt;
&lt;br /&gt;
Conditional Generative Adversarial Nets (#60)Mehdi Mirza, Simon Osindero&lt;br /&gt;
&lt;br /&gt;
Analyzing the Dynamics of Gated Auto-encoders (#61)Daniel Im, Graham Taylor&lt;br /&gt;
&lt;br /&gt;
Representation as a Service (#63)Ouais Alsharif, Joelle Pineau, philip bachman&lt;br /&gt;
&lt;br /&gt;
Provable Methods for Training Neural Networks with Sparse Connectivity (#66)Hanie Sedghi, Anima Anandkumar&lt;br /&gt;
&lt;br /&gt;
Trust Region Policy Optimization (#67)John D. Schulman, Philipp C. Moritz, Sergey Levine, Michael I. Jordan, Pieter Abbeel&lt;br /&gt;
&lt;br /&gt;
'''Document Embedding with Paragraph Vectors''' (#68)Andrew Dai, Christopher Olah, Quoc Le, Greg Corrado&lt;br /&gt;
&lt;br /&gt;
Backprop-Free Auto-Encoders (#69)Dong-Hyun Lee, Yoshua Bengio&lt;br /&gt;
&lt;br /&gt;
Rate-Distortion Auto-Encoders (#73)Luis Sanchez Giraldo, Jose Principe&lt;/div&gt;</summary>
		<author><name>Lr</name></author>	</entry>

	</feed>