<?xml version="1.0"?>
<?xml-stylesheet type="text/css" href="http://www.cslt.org/mediawiki/skins/common/feed.css?303"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="zh-cn">
		<id>http://www.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Kangxf</id>
		<title>cslt Wiki - 用户贡献 [zh-cn]</title>
		<link rel="self" type="application/atom+xml" href="http://www.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Kangxf"/>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E7%89%B9%E6%AE%8A:%E7%94%A8%E6%88%B7%E8%B4%A1%E7%8C%AE/Kangxf"/>
		<updated>2026-04-04T10:42:31Z</updated>
		<subtitle>用户贡献</subtitle>
		<generator>MediaWiki 1.23.3</generator>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Throat.png</id>
		<title>文件:Throat.png</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Throat.png"/>
				<updated>2017-10-29T17:34:31Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Nose.png</id>
		<title>文件:Nose.png</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Nose.png"/>
				<updated>2017-10-22T21:39:10Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_throat.png</id>
		<title>文件:171022 throat.png</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_throat.png"/>
				<updated>2017-10-22T21:38:29Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_zaza.png</id>
		<title>文件:171022 zaza.png</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_zaza.png"/>
				<updated>2017-10-22T21:37:41Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_zeze.png</id>
		<title>文件:171022 zeze.png</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_zeze.png"/>
				<updated>2017-10-22T21:36:20Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_en.png</id>
		<title>文件:171022 en.png</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_en.png"/>
				<updated>2017-10-22T21:35:32Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_Laugh.png</id>
		<title>文件:171022 Laugh.png</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_Laugh.png"/>
				<updated>2017-10-22T21:34:01Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_Cough.png</id>
		<title>文件:171022 Cough.png</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:171022_Cough.png"/>
				<updated>2017-10-22T21:32:39Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-25</id>
		<title>ASR Status Report 2017-9-25</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-25"/>
				<updated>2017-09-26T05:54:57Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.9.25&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
*Improve and release the Recording APP of Android and IOS&lt;br /&gt;
*Record about 100 people audios with zhangmiao&lt;br /&gt;
|| &lt;br /&gt;
*Record the audios with zhangmiao&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* apply fake Lid into Gsoft max model chinese and english can be decoded but kazak uyghur and tibetan can't &lt;br /&gt;
* config wolf05&lt;br /&gt;
|| &lt;br /&gt;
* make fake Lid work&lt;br /&gt;
* submit apsipa paper&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* PTN experiment continued,[http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=tangzy&amp;amp;step=view_request&amp;amp;cvssid=636]&lt;br /&gt;
||&lt;br /&gt;
* Long term PTN.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
----------------------------------------------&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
* Absent&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
*Test and improve the IOS APP for recording audios.&lt;br /&gt;
*Finish the experiment to test the machine error rate,the result is in my cvss [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=kangxf&amp;amp;step=view_request&amp;amp;cvssid=629 here] .&lt;br /&gt;
|| &lt;br /&gt;
*Record the audios with zhangmiao using the money from wang.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Finish human test website&lt;br /&gt;
* Design recording app with Kangxf&lt;br /&gt;
* T-SNE analysis&lt;br /&gt;
|| &lt;br /&gt;
* Absent for school class&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Implementation of node-pruning.&lt;br /&gt;
* comparison of connection-pruning and node-pruning, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangyanqing&amp;amp;step=view_request&amp;amp;cvssid=634 here]&lt;br /&gt;
||&lt;br /&gt;
* continue on relationship and comparison of connection-pruning and node-pruning.&lt;br /&gt;
* Implementation of long-term dropout and experiments based on it.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* group-based softmax finished [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=627 here]&lt;br /&gt;
* multi-decoding for group-based softmax (in progress)&lt;br /&gt;
|| &lt;br /&gt;
* mulit-decoding for group-based softmax&lt;br /&gt;
* PTN &lt;br /&gt;
* apply Lid for group-based softmax&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615]&lt;br /&gt;
** Make some smooth tricks (Silence limits [MDR] and window-based smooth [FAR]).&lt;br /&gt;
** R.T. test.&lt;br /&gt;
* Music / Noise detection, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=624]&lt;br /&gt;
||&lt;br /&gt;
* Package the code for speaker segmentaion.&lt;br /&gt;
* Go on music / noise detection tasks.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Part theoretical study of mispronunciation detection.&lt;br /&gt;
* Toolbook writing.&lt;br /&gt;
||&lt;br /&gt;
* Experiments on phonetic LID.&lt;br /&gt;
* Experiments on mispronunciation detection&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-18</id>
		<title>ASR Status Report 2017-9-18</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-18"/>
				<updated>2017-09-18T05:53:40Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
* Absent&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
*Test and improve the IOS APP for recording audios.&lt;br /&gt;
*Finish the experiment to test the machine error rate,the result is in my cvss [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=kangxf&amp;amp;step=view_request&amp;amp;cvssid=629 here] .&lt;br /&gt;
|| &lt;br /&gt;
*Record the audios with zhangmiao using the money from wang.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Implementation of node-pruning.&lt;br /&gt;
* comparison of connection-pruning and node-pruning, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangyanqing&amp;amp;step=view_request&amp;amp;cvssid=634 here]&lt;br /&gt;
||&lt;br /&gt;
* continue on relationship and comparison of connection-pruning and node-pruning.&lt;br /&gt;
* Implementation of long-term dropout and experiments based on it.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* group-based softmax finished [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=627 here]&lt;br /&gt;
* multi-decoding for group-based softmax (in progress)&lt;br /&gt;
|| &lt;br /&gt;
* mulit-decoding for group-based softmax&lt;br /&gt;
* PTN &lt;br /&gt;
* apply Lid for group-based softmax&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615]&lt;br /&gt;
** Make some smooth tricks (Silence limits [MDR] and window-based smooth [FAR]).&lt;br /&gt;
** R.T. test.&lt;br /&gt;
* Music / Noise detection, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=624]&lt;br /&gt;
||&lt;br /&gt;
* Package the code for speaker segmentaion.&lt;br /&gt;
* Go on music / noise detection tasks.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Part theoretical study of mispronunciation detection.&lt;br /&gt;
* Toolbook writing.&lt;br /&gt;
||&lt;br /&gt;
* Experiments on phonetic LID.&lt;br /&gt;
* Experiments on mispronunciation detection&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
----------------------------------------------&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
*Got phonetic feat from a stronger phonetic network&lt;br /&gt;
*Finished part of the experiment using stronger phonetic feature. &lt;br /&gt;
||&lt;br /&gt;
*Will be absent for school.&lt;br /&gt;
*But I will finish the remaining experiment.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* improve the human Test website：, save the test recordings, decline the positive samples&lt;br /&gt;
* Recording and cutting the audios, a total of 12 groups&lt;br /&gt;
|| &lt;br /&gt;
* Continue to record the audios with zhangmiao&lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Perform human test&lt;br /&gt;
* Record some other people and do the experiments again&lt;br /&gt;
|| &lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
* Recording(the goal is to record 400 to 500 people) [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/cc/录音说明.pdf here]&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* multi-decoding ASR model with more pdfs. Performance better than before but not well enough&lt;br /&gt;
* add sperate symbel to discriminated kazak and uyghur word set&lt;br /&gt;
* group-based softmax(in progress)&lt;br /&gt;
|| &lt;br /&gt;
* finish group-based softmax and test the performance&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615 here]&lt;br /&gt;
** Complete the phonetic-aware speaker segmentation.&lt;br /&gt;
*** Word-level boundaries from the ASR.&lt;br /&gt;
*** Word-level d-vector and clustering.&lt;br /&gt;
||&lt;br /&gt;
* Try some smooth tricks.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Organized the code and doc of Parrot system[http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=tangzy&amp;amp;step=view_request&amp;amp;cvssid=635]&lt;br /&gt;
||&lt;br /&gt;
* Theoretical study of pronunciation detection&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-18</id>
		<title>ASR Status Report 2017-9-18</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-18"/>
				<updated>2017-09-18T05:53:10Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
* Absent&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
*Test and improve the IOS APP for recording audios.&lt;br /&gt;
*Finish the experiment to test the machine error rate,the result is in my cvss[http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=kangxf&amp;amp;step=view_request&amp;amp;cvssid=629 here] .&lt;br /&gt;
|| &lt;br /&gt;
*Record the audios with zhangmiao using the money from wang.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Implementation of node-pruning.&lt;br /&gt;
* comparison of connection-pruning and node-pruning, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangyanqing&amp;amp;step=view_request&amp;amp;cvssid=634 here]&lt;br /&gt;
||&lt;br /&gt;
* continue on relationship and comparison of connection-pruning and node-pruning.&lt;br /&gt;
* Implementation of long-term dropout and experiments based on it.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* group-based softmax finished [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=627 here]&lt;br /&gt;
* multi-decoding for group-based softmax (in progress)&lt;br /&gt;
|| &lt;br /&gt;
* mulit-decoding for group-based softmax&lt;br /&gt;
* PTN &lt;br /&gt;
* apply Lid for group-based softmax&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615]&lt;br /&gt;
** Make some smooth tricks (Silence limits [MDR] and window-based smooth [FAR]).&lt;br /&gt;
** R.T. test.&lt;br /&gt;
* Music / Noise detection, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=624]&lt;br /&gt;
||&lt;br /&gt;
* Package the code for speaker segmentaion.&lt;br /&gt;
* Go on music / noise detection tasks.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Part theoretical study of mispronunciation detection.&lt;br /&gt;
* Toolbook writing.&lt;br /&gt;
||&lt;br /&gt;
* Experiments on phonetic LID.&lt;br /&gt;
* Experiments on mispronunciation detection&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
----------------------------------------------&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
*Got phonetic feat from a stronger phonetic network&lt;br /&gt;
*Finished part of the experiment using stronger phonetic feature. &lt;br /&gt;
||&lt;br /&gt;
*Will be absent for school.&lt;br /&gt;
*But I will finish the remaining experiment.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* improve the human Test website：, save the test recordings, decline the positive samples&lt;br /&gt;
* Recording and cutting the audios, a total of 12 groups&lt;br /&gt;
|| &lt;br /&gt;
* Continue to record the audios with zhangmiao&lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Perform human test&lt;br /&gt;
* Record some other people and do the experiments again&lt;br /&gt;
|| &lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
* Recording(the goal is to record 400 to 500 people) [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/cc/录音说明.pdf here]&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* multi-decoding ASR model with more pdfs. Performance better than before but not well enough&lt;br /&gt;
* add sperate symbel to discriminated kazak and uyghur word set&lt;br /&gt;
* group-based softmax(in progress)&lt;br /&gt;
|| &lt;br /&gt;
* finish group-based softmax and test the performance&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615 here]&lt;br /&gt;
** Complete the phonetic-aware speaker segmentation.&lt;br /&gt;
*** Word-level boundaries from the ASR.&lt;br /&gt;
*** Word-level d-vector and clustering.&lt;br /&gt;
||&lt;br /&gt;
* Try some smooth tricks.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Organized the code and doc of Parrot system[http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=tangzy&amp;amp;step=view_request&amp;amp;cvssid=635]&lt;br /&gt;
||&lt;br /&gt;
* Theoretical study of pronunciation detection&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-18</id>
		<title>ASR Status Report 2017-9-18</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-18"/>
				<updated>2017-09-18T05:50:13Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
* Absent&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
*Test and improve the IOS APP for recording audios.&lt;br /&gt;
*Finish the experiment to test the machine error rate,the result is in my cvss.&lt;br /&gt;
|| &lt;br /&gt;
*Record the audios with zhangmiao using the money from wang.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Implementation of node-pruning.&lt;br /&gt;
* comparison of connection-pruning and node-pruning, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangyanqing&amp;amp;step=view_request&amp;amp;cvssid=634 here]&lt;br /&gt;
||&lt;br /&gt;
* continue on relationship and comparison of connection-pruning and node-pruning.&lt;br /&gt;
* Implementation of long-term dropout and experiments based on it.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* group-based softmax finished [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=627 here]&lt;br /&gt;
* multi-decoding for group-based softmax (in progress)&lt;br /&gt;
|| &lt;br /&gt;
* mulit-decoding for group-based softmax&lt;br /&gt;
* PTN &lt;br /&gt;
* apply Lid for group-based softmax&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615]&lt;br /&gt;
** Make some smooth tricks (Silence limits [MDR] and window-based smooth [FAR]).&lt;br /&gt;
** R.T. test.&lt;br /&gt;
* Music / Noise detection, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=624]&lt;br /&gt;
||&lt;br /&gt;
* Package the code for speaker segmentaion.&lt;br /&gt;
* Go on music / noise detection tasks.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Part theoretical study of mispronunciation detection.&lt;br /&gt;
* Toolbook writing.&lt;br /&gt;
||&lt;br /&gt;
* Experiments on phonetic LID.&lt;br /&gt;
* Experiments on mispronunciation detection&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
----------------------------------------------&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
*Got phonetic feat from a stronger phonetic network&lt;br /&gt;
*Finished part of the experiment using stronger phonetic feature. &lt;br /&gt;
||&lt;br /&gt;
*Will be absent for school.&lt;br /&gt;
*But I will finish the remaining experiment.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* improve the human Test website：, save the test recordings, decline the positive samples&lt;br /&gt;
* Recording and cutting the audios, a total of 12 groups&lt;br /&gt;
|| &lt;br /&gt;
* Continue to record the audios with zhangmiao&lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Perform human test&lt;br /&gt;
* Record some other people and do the experiments again&lt;br /&gt;
|| &lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
* Recording(the goal is to record 400 to 500 people) [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/cc/录音说明.pdf here]&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* multi-decoding ASR model with more pdfs. Performance better than before but not well enough&lt;br /&gt;
* add sperate symbel to discriminated kazak and uyghur word set&lt;br /&gt;
* group-based softmax(in progress)&lt;br /&gt;
|| &lt;br /&gt;
* finish group-based softmax and test the performance&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615 here]&lt;br /&gt;
** Complete the phonetic-aware speaker segmentation.&lt;br /&gt;
*** Word-level boundaries from the ASR.&lt;br /&gt;
*** Word-level d-vector and clustering.&lt;br /&gt;
||&lt;br /&gt;
* Try some smooth tricks.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Organized the code and doc of Parrot system[http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=tangzy&amp;amp;step=view_request&amp;amp;cvssid=635]&lt;br /&gt;
||&lt;br /&gt;
* Theoretical study of pronunciation detection&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors</id>
		<title>Cslt-member-visitors</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors"/>
				<updated>2017-09-14T03:00:24Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：/* Xiaofei Kang (康晓非) */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
==Professionals==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Engineers==&lt;br /&gt;
&lt;br /&gt;
=== Yuxin Zhang (张雨心） ===&lt;br /&gt;
[[文件:Zyx.jpg|200px]]&lt;br /&gt;
* Haixia research center&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Finance processing&lt;br /&gt;
* [[媒体文件:Agreement zyx.jpg|Data Security Agreement]]&lt;br /&gt;
&lt;br /&gt;
==Students==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Jiyuan Zhang (张记袁）===&lt;br /&gt;
[[文件:Zhangjiyuan.png|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2016.4-&lt;br /&gt;
* neural generation model&lt;br /&gt;
* [[媒体文件:An overview of machine translation.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zhangjy_data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Ying Shi (石颖）===&lt;br /&gt;
[[文件:Ying_shi.jpg|200px]]&lt;br /&gt;
* BJTU&lt;br /&gt;
* 2016.6.15-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Shiying_bi_weekly_report.ppt|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement_YingShi.jpg|DataSecurityAgreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Yixiang Chen (陈怿翔)===&lt;br /&gt;
[[文件:Chenyx.jpg|200px]]&lt;br /&gt;
*University of China Mining&lt;br /&gt;
* 2016.7-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Chenyx_report.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Chenyx data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Shiyue Zhang(张诗悦)===&lt;br /&gt;
[[文件:Zhang Shiyue.jpg|200px]]&lt;br /&gt;
* BUTP&lt;br /&gt;
* 2016.9.06-&lt;br /&gt;
* Language processing&lt;br /&gt;
* [[媒体文件:1.pic hd.jpg| Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
=== Yang Wei (魏扬） ===&lt;br /&gt;
[[文件:Weiy_photo.jpg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-monthly report weiy.pdf|Bi-monthly report]]&lt;br /&gt;
* [[媒体文件:Data agreement weiy.jpg|Data security agreement]]&lt;br /&gt;
&lt;br /&gt;
===Yanqing Wang（王延清）===&lt;br /&gt;
[[文件:wyq photo.jpeg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.11-2017.2&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-weekly_report.pptx|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement wangyanqing.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
=== Yaodong Wang (王耀东) ===&lt;br /&gt;
[[文件:wangyd.jpg|200px]]&lt;br /&gt;
* CUFE&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:Bi_weekly_report.pptx |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data_security_Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Tongzheng Ren (任桐正) ===&lt;br /&gt;
[[文件:IcCardPicture.do2.jpeg|200px]]&lt;br /&gt;
* THU&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:利用LSTM预测时间序列.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data Security Agreement-Tongzheng Ren.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Shipan Ren (任师攀) ===&lt;br /&gt;
[[文件:Rsp.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.05.10 -&lt;br /&gt;
* [[媒体文件:seq2seq.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Miao Zhang (张淼) ===&lt;br /&gt;
[[文件:miao.JPG|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2017.5.1 -&lt;br /&gt;
* [[媒体文件:Zm cough.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zm.JPG|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;br /&gt;
&lt;br /&gt;
=== Xuejing Zhang (张学敬) ===&lt;br /&gt;
[[文件:Zhangxuejing.jpg|200px]]&lt;br /&gt;
* BISTU&lt;br /&gt;
* 2017.7.7 -&lt;br /&gt;
* [[媒体文件:Zhangxj.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Xiaofei Kang (康晓非) ===&lt;br /&gt;
[[文件:头像.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.7.17 -&lt;br /&gt;
* [[媒体文件:Kangxf_biweekly.pdf|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Kangxf_Data.jpg|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;br /&gt;
&lt;br /&gt;
=== Jiayu Guo (郭佳雨) ===&lt;br /&gt;
[[文件:1.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.7.18 -&lt;br /&gt;
* [[媒体文件:security.jpg|Data security agreement]]&lt;br /&gt;
* Natural Laguage Processing&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Kangxf_biweekly.pdf</id>
		<title>文件:Kangxf biweekly.pdf</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Kangxf_biweekly.pdf"/>
				<updated>2017-09-14T02:59:06Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors</id>
		<title>Cslt-member-visitors</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors"/>
				<updated>2017-09-14T02:36:45Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：/* Jiayu Guo (郭佳雨) */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
==Professionals==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Engineers==&lt;br /&gt;
&lt;br /&gt;
=== Yuxin Zhang (张雨心） ===&lt;br /&gt;
[[文件:Zyx.jpg|200px]]&lt;br /&gt;
* Haixia research center&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Finance processing&lt;br /&gt;
* [[媒体文件:Agreement zyx.jpg|Data Security Agreement]]&lt;br /&gt;
&lt;br /&gt;
==Students==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Jiyuan Zhang (张记袁）===&lt;br /&gt;
[[文件:Zhangjiyuan.png|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2016.4-&lt;br /&gt;
* neural generation model&lt;br /&gt;
* [[媒体文件:An overview of machine translation.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zhangjy_data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Ying Shi (石颖）===&lt;br /&gt;
[[文件:Ying_shi.jpg|200px]]&lt;br /&gt;
* BJTU&lt;br /&gt;
* 2016.6.15-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Shiying_bi_weekly_report.ppt|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement_YingShi.jpg|DataSecurityAgreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Yixiang Chen (陈怿翔)===&lt;br /&gt;
[[文件:Chenyx.jpg|200px]]&lt;br /&gt;
*University of China Mining&lt;br /&gt;
* 2016.7-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Chenyx_report.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Chenyx data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Shiyue Zhang(张诗悦)===&lt;br /&gt;
[[文件:Zhang Shiyue.jpg|200px]]&lt;br /&gt;
* BUTP&lt;br /&gt;
* 2016.9.06-&lt;br /&gt;
* Language processing&lt;br /&gt;
* [[媒体文件:1.pic hd.jpg| Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
=== Yang Wei (魏扬） ===&lt;br /&gt;
[[文件:Weiy_photo.jpg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-monthly report weiy.pdf|Bi-monthly report]]&lt;br /&gt;
* [[媒体文件:Data agreement weiy.jpg|Data security agreement]]&lt;br /&gt;
&lt;br /&gt;
===Yanqing Wang（王延清）===&lt;br /&gt;
[[文件:wyq photo.jpeg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.11-2017.2&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-weekly_report.pptx|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement wangyanqing.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
=== Yaodong Wang (王耀东) ===&lt;br /&gt;
[[文件:wangyd.jpg|200px]]&lt;br /&gt;
* CUFE&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:Bi_weekly_report.pptx |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data_security_Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Tongzheng Ren (任桐正) ===&lt;br /&gt;
[[文件:IcCardPicture.do2.jpeg|200px]]&lt;br /&gt;
* THU&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:利用LSTM预测时间序列.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data Security Agreement-Tongzheng Ren.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Shipan Ren (任师攀) ===&lt;br /&gt;
[[文件:Rsp.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.05.10 -&lt;br /&gt;
* [[媒体文件:seq2seq.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Miao Zhang (张淼) ===&lt;br /&gt;
[[文件:miao.JPG|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2017.5.1 -&lt;br /&gt;
* [[媒体文件:Zm cough.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zm.JPG|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;br /&gt;
&lt;br /&gt;
=== Xuejing Zhang (张学敬) ===&lt;br /&gt;
[[文件:Zhangxuejing.jpg|200px]]&lt;br /&gt;
* BISTU&lt;br /&gt;
* 2017.7.7 -&lt;br /&gt;
* [[媒体文件:Zhangxj.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Xiaofei Kang (康晓非) ===&lt;br /&gt;
[[文件:头像.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.7.17 -&lt;br /&gt;
* [[媒体文件:xxxx.jpg|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Kangxf_Data.jpg|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;br /&gt;
&lt;br /&gt;
=== Jiayu Guo (郭佳雨) ===&lt;br /&gt;
[[文件:1.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.7.18 -&lt;br /&gt;
* [[媒体文件:1.jpg|Data security agreement]]&lt;br /&gt;
* Natural Laguage Processing&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Kangxf_Data.jpg</id>
		<title>文件:Kangxf Data.jpg</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Kangxf_Data.jpg"/>
				<updated>2017-09-14T02:34:53Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors</id>
		<title>Cslt-member-visitors</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors"/>
				<updated>2017-09-14T02:29:11Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：/* Xiaofei Kang (康晓非) */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
==Professionals==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Engineers==&lt;br /&gt;
&lt;br /&gt;
=== Yuxin Zhang (张雨心） ===&lt;br /&gt;
[[文件:Zyx.jpg|200px]]&lt;br /&gt;
* Haixia research center&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Finance processing&lt;br /&gt;
* [[媒体文件:Agreement zyx.jpg|Data Security Agreement]]&lt;br /&gt;
&lt;br /&gt;
==Students==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Jiyuan Zhang (张记袁）===&lt;br /&gt;
[[文件:Zhangjiyuan.png|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2016.4-&lt;br /&gt;
* neural generation model&lt;br /&gt;
* [[媒体文件:An overview of machine translation.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zhangjy_data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Ying Shi (石颖）===&lt;br /&gt;
[[文件:Ying_shi.jpg|200px]]&lt;br /&gt;
* BJTU&lt;br /&gt;
* 2016.6.15-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Shiying_bi_weekly_report.ppt|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement_YingShi.jpg|DataSecurityAgreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Yixiang Chen (陈怿翔)===&lt;br /&gt;
[[文件:Chenyx.jpg|200px]]&lt;br /&gt;
*University of China Mining&lt;br /&gt;
* 2016.7-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Chenyx_report.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Chenyx data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Shiyue Zhang(张诗悦)===&lt;br /&gt;
[[文件:Zhang Shiyue.jpg|200px]]&lt;br /&gt;
* BUTP&lt;br /&gt;
* 2016.9.06-&lt;br /&gt;
* Language processing&lt;br /&gt;
* [[媒体文件:1.pic hd.jpg| Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
=== Yang Wei (魏扬） ===&lt;br /&gt;
[[文件:Weiy_photo.jpg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-monthly report weiy.pdf|Bi-monthly report]]&lt;br /&gt;
* [[媒体文件:Data agreement weiy.jpg|Data security agreement]]&lt;br /&gt;
&lt;br /&gt;
===Yanqing Wang（王延清）===&lt;br /&gt;
[[文件:wyq photo.jpeg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.11-2017.2&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-weekly_report.pptx|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement wangyanqing.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
=== Yaodong Wang (王耀东) ===&lt;br /&gt;
[[文件:wangyd.jpg|200px]]&lt;br /&gt;
* CUFE&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:Bi_weekly_report.pptx |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data_security_Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Tongzheng Ren (任桐正) ===&lt;br /&gt;
[[文件:IcCardPicture.do2.jpeg|200px]]&lt;br /&gt;
* THU&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:利用LSTM预测时间序列.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data Security Agreement-Tongzheng Ren.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Shipan Ren (任师攀) ===&lt;br /&gt;
[[文件:Rsp.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.05.10 -&lt;br /&gt;
* [[媒体文件:seq2seq.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Miao Zhang (张淼) ===&lt;br /&gt;
[[文件:miao.JPG|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2017.5.1 -&lt;br /&gt;
* [[媒体文件:Zm cough.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zm.JPG|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;br /&gt;
&lt;br /&gt;
=== Xuejing Zhang (张学敬) ===&lt;br /&gt;
[[文件:Zhangxuejing.jpg|200px]]&lt;br /&gt;
* BISTU&lt;br /&gt;
* 2017.7.7 -&lt;br /&gt;
* [[媒体文件:Zhangxj.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Xiaofei Kang (康晓非) ===&lt;br /&gt;
[[文件:头像.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.7.17 -&lt;br /&gt;
* [[媒体文件:xxxx.jpg|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Kangxf_Data.jpg|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;br /&gt;
&lt;br /&gt;
=== Jiayu Guo (郭佳雨) ===&lt;br /&gt;
[[文件:xxxx.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.7.18 -&lt;br /&gt;
* [[媒体文件:xxxx.jpg|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors</id>
		<title>Cslt-member-visitors</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors"/>
				<updated>2017-09-14T02:26:11Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
==Professionals==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Engineers==&lt;br /&gt;
&lt;br /&gt;
=== Yuxin Zhang (张雨心） ===&lt;br /&gt;
[[文件:Zyx.jpg|200px]]&lt;br /&gt;
* Haixia research center&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Finance processing&lt;br /&gt;
* [[媒体文件:Agreement zyx.jpg|Data Security Agreement]]&lt;br /&gt;
&lt;br /&gt;
==Students==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Jiyuan Zhang (张记袁）===&lt;br /&gt;
[[文件:Zhangjiyuan.png|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2016.4-&lt;br /&gt;
* neural generation model&lt;br /&gt;
* [[媒体文件:An overview of machine translation.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zhangjy_data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Ying Shi (石颖）===&lt;br /&gt;
[[文件:Ying_shi.jpg|200px]]&lt;br /&gt;
* BJTU&lt;br /&gt;
* 2016.6.15-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Shiying_bi_weekly_report.ppt|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement_YingShi.jpg|DataSecurityAgreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Yixiang Chen (陈怿翔)===&lt;br /&gt;
[[文件:Chenyx.jpg|200px]]&lt;br /&gt;
*University of China Mining&lt;br /&gt;
* 2016.7-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Chenyx_report.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Chenyx data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Shiyue Zhang(张诗悦)===&lt;br /&gt;
[[文件:Zhang Shiyue.jpg|200px]]&lt;br /&gt;
* BUTP&lt;br /&gt;
* 2016.9.06-&lt;br /&gt;
* Language processing&lt;br /&gt;
* [[媒体文件:1.pic hd.jpg| Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
=== Yang Wei (魏扬） ===&lt;br /&gt;
[[文件:Weiy_photo.jpg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-monthly report weiy.pdf|Bi-monthly report]]&lt;br /&gt;
* [[媒体文件:Data agreement weiy.jpg|Data security agreement]]&lt;br /&gt;
&lt;br /&gt;
===Yanqing Wang（王延清）===&lt;br /&gt;
[[文件:wyq photo.jpeg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.11-2017.2&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-weekly_report.pptx|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement wangyanqing.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
=== Yaodong Wang (王耀东) ===&lt;br /&gt;
[[文件:wangyd.jpg|200px]]&lt;br /&gt;
* CUFE&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:Bi_weekly_report.pptx |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data_security_Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Tongzheng Ren (任桐正) ===&lt;br /&gt;
[[文件:IcCardPicture.do2.jpeg|200px]]&lt;br /&gt;
* THU&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:利用LSTM预测时间序列.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data Security Agreement-Tongzheng Ren.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Shipan Ren (任师攀) ===&lt;br /&gt;
[[文件:Rsp.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.05.10 -&lt;br /&gt;
* [[媒体文件:seq2seq.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Miao Zhang (张淼) ===&lt;br /&gt;
[[文件:miao.JPG|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2017.5.1 -&lt;br /&gt;
* [[媒体文件:Zm cough.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zm.JPG|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;br /&gt;
&lt;br /&gt;
=== Xuejing Zhang (张学敬) ===&lt;br /&gt;
[[文件:Zhangxuejing.jpg|200px]]&lt;br /&gt;
* BISTU&lt;br /&gt;
* 2017.7.7 -&lt;br /&gt;
* [[媒体文件:Zhangxj.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Xiaofei Kang (康晓非) ===&lt;br /&gt;
[[文件:头像.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.7.17 -&lt;br /&gt;
* [[媒体文件:xxxx.jpg|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:xxxx.jpg|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors</id>
		<title>Cslt-member-visitors</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors"/>
				<updated>2017-09-14T02:22:17Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：/* Xiaofei Kang (康晓非) */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
==Professionals==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Engineers==&lt;br /&gt;
&lt;br /&gt;
=== Yuxin Zhang (张雨心） ===&lt;br /&gt;
[[文件:Zyx.jpg|200px]]&lt;br /&gt;
* Haixia research center&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Finance processing&lt;br /&gt;
* [[媒体文件:Agreement zyx.jpg|Data Security Agreement]]&lt;br /&gt;
&lt;br /&gt;
==Students==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Jiyuan Zhang (张记袁）===&lt;br /&gt;
[[文件:Zhangjiyuan.png|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2016.4-&lt;br /&gt;
* neural generation model&lt;br /&gt;
* [[媒体文件:An overview of machine translation.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zhangjy_data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Ying Shi (石颖）===&lt;br /&gt;
[[文件:Ying_shi.jpg|200px]]&lt;br /&gt;
* BJTU&lt;br /&gt;
* 2016.6.15-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Shiying_bi_weekly_report.ppt|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement_YingShi.jpg|DataSecurityAgreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Yixiang Chen (陈怿翔)===&lt;br /&gt;
[[文件:Chenyx.jpg|200px]]&lt;br /&gt;
*University of China Mining&lt;br /&gt;
* 2016.7-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Chenyx_report.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Chenyx data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Shiyue Zhang(张诗悦)===&lt;br /&gt;
[[文件:Zhang Shiyue.jpg|200px]]&lt;br /&gt;
* BUTP&lt;br /&gt;
* 2016.9.06-&lt;br /&gt;
* Language processing&lt;br /&gt;
* [[媒体文件:1.pic hd.jpg| Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
=== Yang Wei (魏扬） ===&lt;br /&gt;
[[文件:Weiy_photo.jpg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-monthly report weiy.pdf|Bi-monthly report]]&lt;br /&gt;
* [[媒体文件:Data agreement weiy.jpg|Data security agreement]]&lt;br /&gt;
&lt;br /&gt;
===Yanqing Wang（王延清）===&lt;br /&gt;
[[文件:wyq photo.jpeg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.11-2017.2&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-weekly_report.pptx|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement wangyanqing.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
=== Yaodong Wang (王耀东) ===&lt;br /&gt;
[[文件:wangyd.jpg|200px]]&lt;br /&gt;
* CUFE&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:Bi_weekly_report.pptx |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data_security_Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Tongzheng Ren (任桐正) ===&lt;br /&gt;
[[文件:IcCardPicture.do2.jpeg|200px]]&lt;br /&gt;
* THU&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:利用LSTM预测时间序列.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data Security Agreement-Tongzheng Ren.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Shipan Ren (任师攀) ===&lt;br /&gt;
[[文件:Rsp.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.05.10 -&lt;br /&gt;
* [[媒体文件:seq2seq.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Miao Zhang (张淼) ===&lt;br /&gt;
[[文件:miao.JPG|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2017.5.1 -&lt;br /&gt;
* [[媒体文件:Zm cough.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zm.JPG|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;br /&gt;
&lt;br /&gt;
=== Xuejing Zhang (张学敬) ===&lt;br /&gt;
[[文件:Zhangxuejing.jpg|200px]]&lt;br /&gt;
* BISTU&lt;br /&gt;
* 2017.7.7 -&lt;br /&gt;
* [[媒体文件:Zhangxj.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Xiaofei Kang (康晓非) ===&lt;br /&gt;
[[文件:头像.jpg | ]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.7.17 -&lt;br /&gt;
* [[媒体文件:xxxx.jpg|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:xxxx.jpg|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors</id>
		<title>Cslt-member-visitors</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/Cslt-member-visitors"/>
				<updated>2017-09-14T02:20:41Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：/* Xiaofei Kang (康晓非) */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
==Professionals==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Engineers==&lt;br /&gt;
&lt;br /&gt;
=== Yuxin Zhang (张雨心） ===&lt;br /&gt;
[[文件:Zyx.jpg|200px]]&lt;br /&gt;
* Haixia research center&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Finance processing&lt;br /&gt;
* [[媒体文件:Agreement zyx.jpg|Data Security Agreement]]&lt;br /&gt;
&lt;br /&gt;
==Students==&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Jiyuan Zhang (张记袁）===&lt;br /&gt;
[[文件:Zhangjiyuan.png|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2016.4-&lt;br /&gt;
* neural generation model&lt;br /&gt;
* [[媒体文件:An overview of machine translation.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zhangjy_data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Ying Shi (石颖）===&lt;br /&gt;
[[文件:Ying_shi.jpg|200px]]&lt;br /&gt;
* BJTU&lt;br /&gt;
* 2016.6.15-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Shiying_bi_weekly_report.ppt|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement_YingShi.jpg|DataSecurityAgreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Yixiang Chen (陈怿翔)===&lt;br /&gt;
[[文件:Chenyx.jpg|200px]]&lt;br /&gt;
*University of China Mining&lt;br /&gt;
* 2016.7-&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Chenyx_report.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Chenyx data.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
===Shiyue Zhang(张诗悦)===&lt;br /&gt;
[[文件:Zhang Shiyue.jpg|200px]]&lt;br /&gt;
* BUTP&lt;br /&gt;
* 2016.9.06-&lt;br /&gt;
* Language processing&lt;br /&gt;
* [[媒体文件:1.pic hd.jpg| Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
=== Yang Wei (魏扬） ===&lt;br /&gt;
[[文件:Weiy_photo.jpg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.10 -&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-monthly report weiy.pdf|Bi-monthly report]]&lt;br /&gt;
* [[媒体文件:Data agreement weiy.jpg|Data security agreement]]&lt;br /&gt;
&lt;br /&gt;
===Yanqing Wang（王延清）===&lt;br /&gt;
[[文件:wyq photo.jpeg|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2016.11-2017.2&lt;br /&gt;
* Speech processing&lt;br /&gt;
* [[媒体文件:Bi-weekly_report.pptx|Bi-weekly report]]&lt;br /&gt;
*[[媒体文件:DataSecurityAgreement wangyanqing.jpg|Data_security_agreement]]&lt;br /&gt;
&lt;br /&gt;
=== Yaodong Wang (王耀东) ===&lt;br /&gt;
[[文件:wangyd.jpg|200px]]&lt;br /&gt;
* CUFE&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:Bi_weekly_report.pptx |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data_security_Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Tongzheng Ren (任桐正) ===&lt;br /&gt;
[[文件:IcCardPicture.do2.jpeg|200px]]&lt;br /&gt;
* THU&lt;br /&gt;
* 2016.12.22 -&lt;br /&gt;
* [[媒体文件:利用LSTM预测时间序列.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Data Security Agreement-Tongzheng Ren.jpg|Data security agreement]]&lt;br /&gt;
* Financial processing&lt;br /&gt;
&lt;br /&gt;
=== Shipan Ren (任师攀) ===&lt;br /&gt;
[[文件:Rsp.jpg|200px]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.05.10 -&lt;br /&gt;
* [[媒体文件:seq2seq.pptx|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Agreement.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Miao Zhang (张淼) ===&lt;br /&gt;
[[文件:miao.JPG|200px]]&lt;br /&gt;
* BUPT&lt;br /&gt;
* 2017.5.1 -&lt;br /&gt;
* [[媒体文件:Zm cough.pdf |Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:Zm.JPG|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;br /&gt;
&lt;br /&gt;
=== Xuejing Zhang (张学敬) ===&lt;br /&gt;
[[文件:Zhangxuejing.jpg|200px]]&lt;br /&gt;
* BISTU&lt;br /&gt;
* 2017.7.7 -&lt;br /&gt;
* [[媒体文件:Zhangxj.jpg|Data security agreement]]&lt;br /&gt;
* Language processing&lt;br /&gt;
&lt;br /&gt;
=== Xiaofei Kang (康晓非) ===&lt;br /&gt;
[[文件:头像.jpg]]&lt;br /&gt;
* PKU&lt;br /&gt;
* 2017.7.17 -&lt;br /&gt;
* [[媒体文件:xxxx.jpg|Bi-weekly report]]&lt;br /&gt;
* [[媒体文件:xxxx.jpg|Data security agreement]]&lt;br /&gt;
* Speech processing&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E5%A4%B4%E5%83%8F.jpg</id>
		<title>文件:头像.jpg</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E5%A4%B4%E5%83%8F.jpg"/>
				<updated>2017-09-14T02:09:43Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-11</id>
		<title>ASR Status Report 2017-9-11</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-11"/>
				<updated>2017-09-11T06:42:41Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
*Got phonetic feat from a stronger phonetic network&lt;br /&gt;
*Finished part of the experiment using stronger phonetic feature. &lt;br /&gt;
||&lt;br /&gt;
*Will be absent for school.&lt;br /&gt;
*But I will finish the remaining experiment.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* improve the human Test website：, save the test recordings, decline the positive samples&lt;br /&gt;
* Recording and cutting the audios, a total of 12 groups&lt;br /&gt;
|| &lt;br /&gt;
* Continue to record the audios with zhangmiao&lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Perform human test&lt;br /&gt;
* Record some other people and do the experiments again&lt;br /&gt;
|| &lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
* Recording(the goal is to record 400 to 500 people)&lt;br /&gt;
  [录音说明[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/cc/录音说明.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* multi-decoding ASR model with more pdfs. Performance better than before but not well enough&lt;br /&gt;
* add sperate symbel to discriminated kazak and uyghur word set&lt;br /&gt;
* group-based softmax(in progress)&lt;br /&gt;
|| &lt;br /&gt;
* finish group-based softmax and test the performance&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615 here]&lt;br /&gt;
** Complete the phonetic-aware speaker segmentation.&lt;br /&gt;
*** Word-level boundaries from the ASR.&lt;br /&gt;
*** Word-level d-vector and clustering.&lt;br /&gt;
||&lt;br /&gt;
* Try some smooth tricks.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
----------------------------------------------&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
*Finished the phonetic i-vector experiment.&lt;br /&gt;
||&lt;br /&gt;
*get BN feature and train i-vector LID.&lt;br /&gt;
*Get phonetic feat from a stronger phonetic network&lt;br /&gt;
*combine PTN and phonetic i-vector.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* cutting audio and marking：21 speakers，a total of 1050 sentences&lt;br /&gt;
* Finish the new speaker recognition using the two recordings.&lt;br /&gt;
|| &lt;br /&gt;
* improve the human Test website&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* Perform human test on 21-style speech(add the disguise)&lt;br /&gt;
* Draw spectrums and t-SNE plots compared with experiment results&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Absent.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* multi decodeing ASR model&lt;br /&gt;
* multi decodeing with fake Lid [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=627 here]&lt;br /&gt;
* read code about TTS&lt;br /&gt;
|| &lt;br /&gt;
* employ group softmax to train multi decoding ASR model&lt;br /&gt;
* synthesis one 'real' speech&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent.&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615 here]&lt;br /&gt;
** Dimensionality reduction.&lt;br /&gt;
** Clustering.&lt;br /&gt;
** Visualization.&lt;br /&gt;
||&lt;br /&gt;
* Phonetic-aware speaker segmentation.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* more indicators for VV scoring system, see [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/a/a1/VV_scoring.pdf].&lt;br /&gt;
||&lt;br /&gt;
* more indicators, a demo with Shuai.&lt;br /&gt;
* toolbook writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-11</id>
		<title>ASR Status Report 2017-9-11</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-11"/>
				<updated>2017-09-11T05:22:02Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* improve the human Test website：, save the test recordings, decline the positive samples&lt;br /&gt;
* Recording and cutting the audios, a total of 12 groups&lt;br /&gt;
|| &lt;br /&gt;
* Recording the 440 groups audios left with zhangmiao&lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Perform human test&lt;br /&gt;
* Record some other people and do the experiments again&lt;br /&gt;
|| &lt;br /&gt;
* Continue to ask people to do human test&lt;br /&gt;
* Recording(the goal is to record 400 to 500 people)&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
----------------------------------------------&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
*Finished the phonetic i-vector experiment.&lt;br /&gt;
||&lt;br /&gt;
*get BN feature and train i-vector LID.&lt;br /&gt;
*Get phonetic feat from a stronger phonetic network&lt;br /&gt;
*combine PTN and phonetic i-vector.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* cutting audio and marking：21 speakers，a total of 1050 sentences&lt;br /&gt;
* Finish the new speaker recognition using the two recordings.&lt;br /&gt;
|| &lt;br /&gt;
* improve the human Test website&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* Perform human test on 21-style speech(add the disguise)&lt;br /&gt;
* Draw spectrums and t-SNE plots compared with experiment results&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Absent.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* multi decodeing ASR model&lt;br /&gt;
* multi decodeing with fake Lid [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=627 here]&lt;br /&gt;
* read code about TTS&lt;br /&gt;
|| &lt;br /&gt;
* employ group softmax to train multi decoding ASR model&lt;br /&gt;
* synthesis one 'real' speech&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent.&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Go on speaker segmentation tasks, see [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=615 here]&lt;br /&gt;
** Dimensionality reduction.&lt;br /&gt;
** Clustering.&lt;br /&gt;
** Visualization.&lt;br /&gt;
||&lt;br /&gt;
* Phonetic-aware speaker segmentation.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* more indicators for VV scoring system, see [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/a/a1/VV_scoring.pdf].&lt;br /&gt;
||&lt;br /&gt;
* more indicators, a demo with Shuai.&lt;br /&gt;
* toolbook writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-4</id>
		<title>ASR Status Report 2017-9-4</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-9-4"/>
				<updated>2017-09-04T02:56:10Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;9&amp;quot;|2017.9.4&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|Jiayin Cai&lt;br /&gt;
||&lt;br /&gt;
*Finished the phonetic i-vector experiment.&lt;br /&gt;
||&lt;br /&gt;
*get BN feature and train i-vector LID.&lt;br /&gt;
*Get phonetic feat from a stronger phonetic network&lt;br /&gt;
*combine PTN and phonetic i-vector.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* cutting audio and marking：21 speakers，a total of 1050 sentences&lt;br /&gt;
* Finish the new speaker recognition using the two recordings.&lt;br /&gt;
|| &lt;br /&gt;
* improve the human Test website&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Absent&lt;br /&gt;
|| &lt;br /&gt;
* Perform human test on 21-style speech(add the disguise)&lt;br /&gt;
* Draw spectrums and t-SNE plots compared with experiment results&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* Absent.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* multi decodeing ASR model&lt;br /&gt;
* multi decodeing with fake Lid [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=627 here]&lt;br /&gt;
* read code about TTS&lt;br /&gt;
|| &lt;br /&gt;
* employ group softmax to train multi decoding ASR model&lt;br /&gt;
* synthesis one 'real' speech&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Absent.&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* more indicators for VV scoring system, see [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/a/a1/VV_scoring.pdf].&lt;br /&gt;
||&lt;br /&gt;
* more indicators, a demo with Shuai.&lt;br /&gt;
* toolbook writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
------------------------&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.8.21&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Recording new audios from 38 person, located in /work7/tanghui/kangxf/workspaces/speaker/wavdata/V2.0 &lt;br /&gt;
* Improve the test website to judge before committing&lt;br /&gt;
|| &lt;br /&gt;
* Test the new recording。&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* pruning the connections and refining, [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=tangzy&amp;amp;step=view_request&amp;amp;cvssid=626 results]&lt;br /&gt;
||&lt;br /&gt;
* Absent. &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* check toolkit code&lt;br /&gt;
* multilingual baseline system&lt;br /&gt;
|| &lt;br /&gt;
* train language id model&lt;br /&gt;
* use Lid to do multi-decoding&lt;br /&gt;
* some experiments for zhiyong zhang about TTS&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Attend IS2017.&lt;br /&gt;
||&lt;br /&gt;
* Go on speaker segmentation tasks.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* several indicators for VV scoring system, see [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/a/a1/VV_scoring.pdf].&lt;br /&gt;
||&lt;br /&gt;
* more indicators, a demo with Shuai.&lt;br /&gt;
* toolbook writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/Asr-progress_2017.08</id>
		<title>Asr-progress 2017.08</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/Asr-progress_2017.08"/>
				<updated>2017-09-01T03:06:57Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
===Daily Report===&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Person  !! start!! leave !! hours ||status (problems/solutions)&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.1&lt;br /&gt;
|Yanqing Wang||  11:00  || 19:30   ||  8.5h    ||  start to write TRP ( connection sparseness )&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang || 10:00 || 23:00 || 13h ||   discuss the recording plan and decide a preliminary plan.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.2&lt;br /&gt;
|Yanqing Wang ||  11:00  || 19:00   ||  8h    ||  write TRP ( connection sparseness )&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00 || 23:00 || 13h ||    Complete a part of the recording work: collecting six types of sound from 13 people.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.3&lt;br /&gt;
|Yanqing Wang ||  11:00  || 17:00   ||  6h    ||  write TRP ( connection sparseness )&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00 || 23:00 || 13h ||  read related paper and do a speaker recognition experiment.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.4&lt;br /&gt;
|Yanqing Wang ||   11:00  || 19:00   ||  8h    ||  write TRP ( connection sparseness )&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00 || 23:00 || 13h ||    Finish experiments of 12-style speech with ZhangMiao,a total of 5 experiments.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.5&lt;br /&gt;
|Yanqing Wang ||   12:30  || 19:00   ||  6.5h    ||  write TRP ( connection sparseness )&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.6&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.7&lt;br /&gt;
|Yanqing Wang ||    11:00  || 19:00   ||  8h    ||  write a shell script to prune a new network according to a pruned network&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00 || 23:00 || 13h ||   Recording the  audio for the speaker recognition.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.8&lt;br /&gt;
|Yanqing Wang || 11:00  || 19:00   ||  8h    ||  use the yesterday's shell script but find that the proficiency is too low&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00 || 23:00 || 13h ||    continue to record the audio.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.9&lt;br /&gt;
|Yanqing Wang ||    11:00  || 19:00   ||  8h    ||  learn to write a kaldi-command &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00 || 23:00 || 13h ||   read the paper about i-vector and d-vector.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.10&lt;br /&gt;
|Yanqing Wang ||    11:00  || 19:00   ||  8h    ||  write a kaldi-command to  prune a new network according to a pruned network&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00 || 23:00 || 13h ||  Learn the new test website from zhangmiao &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.11&lt;br /&gt;
|Yanqing Wang ||   11:00  || 19:00   ||  8h    ||  use yesterday's command to prune a new network&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00 || 23:00 || 13h ||  Learn PHP, and test the new test website.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.12&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.13&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.14&lt;br /&gt;
|Yanqing Wang ||    12:00  || 18:00   ||  6h    ||  exp: 97% pct prune and a contrast exp: apply its structure to a new network&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.15&lt;br /&gt;
|Yanqing Wang ||    12:00  || 18:00   ||  6h    ||  exp: 97% pct prune and a contrast exp: apply its structure to a new network&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.16&lt;br /&gt;
|Yanqing Wang ||    12:00  || 20:00   ||  8h    ||  summarize the 2 exp I did yesterday&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.17&lt;br /&gt;
|Yanqing Wang ||    12:00  || 20:00   ||  8h    ||  explore the distribution of the nonlin6's output&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.18&lt;br /&gt;
|Yanqing Wang ||   12:00  || 20:00   ||  8h    ||  explore the distribution of the nonlin6's output&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.19&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.20&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.21&lt;br /&gt;
|Yanqing Wang ||    12:00  || 20:00   ||  8h    ||  do exps: prune a just-randomly-initiated network randomly, and train it ( groups 1 )&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:30 || 23:00 || 12.5h  || Improve the test website to judge before committing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.22&lt;br /&gt;
|Yanqing Wang ||    11:30  || 18:00   ||  6.5h    ||  do exps: prune a just-randomly-initiated network randomly, and train it ( groups 2-3 )&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 9:30 || 21:30 || 12h  ||  Recording new audios for the speaker recognition.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.23&lt;br /&gt;
|Yanqing Wang ||   11:30  || 18:00   ||  6.5h    ||  do exps: prune a just-randomly-initiated network randomly, and train it ( groups 4-5 )&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang|| 9:30 || 21:30 || 12h  ||  continue to record new audios,a total of 38 person.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.24&lt;br /&gt;
|Yanqing Wang ||   11:30  || 18:00   ||  6.5h    || write awk , shell scripts and kaldi-command as a preparation for node-sparseness task.&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 9:30 || 21:30 || 12h  ||  Organize the voice and do a sample test,and learn 4 papers from lantian&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.25&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 9:30 || 21:30 || 12h  ||  do a speaker recognition with the new audios.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.26&lt;br /&gt;
|Yanqing Wang ||  8:30  || 15:00   ||  6.5h    ||  survey on node-sparseness, sammarize the exps of this week and ask for short-time leave&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.27&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||   ||   ||    || &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.28&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 9:30 || 21:30 || 12h  ||  I arrange a new voice and data sets, and find out all the twice recording all of 21 people.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.29&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 9:30 || 21:30 || 12h  ||   cutting the audio of 21 speakers, a total of 1050 sentences.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.30&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:30 || 23:30  ||  13h  || mark the segments of 1050 audios. &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.8.31&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:30 || 23:30  ||  13h  ||   finish the speaker recognition experiment,and get a test result.&lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
===Time Off Table===&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Name       !! Days off&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang|| &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen||  &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang||  &lt;br /&gt;
|-&lt;br /&gt;
|Xiaofei Kang ||&lt;br /&gt;
|-&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-8-28</id>
		<title>ASR Status Report 2017-8-28</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-8-28"/>
				<updated>2017-08-28T06:04:02Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.8.21&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Recording new audios from 38 person, located in /work7/tanghui/kangxf/workspaces/speaker/wavdata/V2.0 &lt;br /&gt;
* Improve the test website to judge before committing&lt;br /&gt;
|| &lt;br /&gt;
* Test the new recording。&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* pruning the connections and refining, [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=tangzy&amp;amp;step=view_request&amp;amp;cvssid=626 results]&lt;br /&gt;
||&lt;br /&gt;
* Absent. &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* check toolkit code&lt;br /&gt;
* multilingual baseline system&lt;br /&gt;
|| &lt;br /&gt;
* train language id model&lt;br /&gt;
* use Lid to do multi-decoding&lt;br /&gt;
* some experiments for zhiyong zhang about TTS&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* several indicators for VV scoring system, see [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/a/a1/VV_scoring.pdf].&lt;br /&gt;
||&lt;br /&gt;
* more indicators, a demo with Shuai.&lt;br /&gt;
* toolbook writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
------------------------&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.8.21&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Absence &lt;br /&gt;
|| &lt;br /&gt;
* finish experiments on 5 recorded speech.&lt;br /&gt;
* Improve and test the human test website. &lt;br /&gt;
* Learn 4 papers from lantian : about speaker recongnition and deep speaker feature.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Prepare the data and finish experiments on 5 recorded speech.&lt;br /&gt;
* Finish the human test website(include 20 styles), express my apprecation to Shuai sister!&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* explore how the pruning method influence the ( distribution of ) output of the network: [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/d/d4/Abs_pos_neg.pdf result]&lt;br /&gt;
* after retraining, the distribution may [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/88/1_RE.pdf reappear].&lt;br /&gt;
||&lt;br /&gt;
* continue on exploration on the network's sparse structure.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* crawler program [finished]&lt;br /&gt;
* tibetan asr system baseline (19.46%)&lt;br /&gt;
|| &lt;br /&gt;
* multilingual decoding &lt;br /&gt;
* maybe I can help Zhiyong Zhang to do some work about TTS&lt;br /&gt;
* check toolkit code(check data website and codemap) and check it into git&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* 1. align the candidate speech (fbank) with phone labels using nnet3-align-compiled (almost finished); 2.analyse the alignment with rhythm, tone, tune, for Parrot system, (revised goodness of pronunciation), to be done.&lt;br /&gt;
* collecting material (PPT) for Kaldi toolbook.&lt;br /&gt;
||&lt;br /&gt;
* analyse the alignment with rhythm, tone, tune, (revised goodness of pronunciation). &lt;br /&gt;
* toolbook writing&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Biweekly_report%E2%80%94Speech_Conversion.pptx</id>
		<title>文件:Biweekly report—Speech Conversion.pptx</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Biweekly_report%E2%80%94Speech_Conversion.pptx"/>
				<updated>2017-08-24T05:41:30Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：双周答辩PPT（2）&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;双周答辩PPT（2）&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E6%95%B0%E6%8D%AE%E4%BF%9D%E5%AF%86%E5%8D%8F%E8%AE%AE_%E5%BA%B7%E6%99%93%E9%9D%9E_CSLT.jpg</id>
		<title>文件:数据保密协议 康晓非 CSLT.jpg</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E6%95%B0%E6%8D%AE%E4%BF%9D%E5%AF%86%E5%8D%8F%E8%AE%AE_%E5%BA%B7%E6%99%93%E9%9D%9E_CSLT.jpg"/>
				<updated>2017-08-24T05:39:39Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：实验室数据保密协议.&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;实验室数据保密协议.&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/Asr-progress_2017.07</id>
		<title>Asr-progress 2017.07</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/Asr-progress_2017.07"/>
				<updated>2017-08-24T05:27:56Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;===Daily Report===&lt;br /&gt;
&lt;br /&gt;
{|class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Person  !! start!! leave !! hours ||status&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.1&lt;br /&gt;
|Yanqing Wang ||  11:00  || 20:00   ||   9h   ||  continue on experiments on 4 types of activation function&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  do a meeting report on trivial events&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||   learn the Speech Recognition lesson from University of Edinburgh.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.2&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||  9h    ||   continue on experiments on 4 types of activation function&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||    11h   ||  design a human test website&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||  plot a specturm of waveform with Matlab and Python.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.3&lt;br /&gt;
|Yanqing Wang || 11:00   || 20:00   ||  9h    ||  change the dimension to 1000 and retry the former exps&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  design a human test website&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.4&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  design a human test website&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||  prepare my biweekly report.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.5&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  design a human test website&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  || continue to prepare my biweekly report.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.6&lt;br /&gt;
|Yanqing Wang || 11:00   || 20:00   ||  9h    ||  change the dimension to 1000 and retry the former exps&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  design a human test website&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  || continue to prepare my biweekly report.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.7&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   ||  continue former exps, read source code of Kaldi&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  finish the human test website&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  || continue to prepare my biweekly report.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.8&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   ||  continue former exps, read source code of Kaldi&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||     ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||  ||     ||     || &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.9&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   ||  continue former exps, read source code of Kaldi&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||     ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||   ||     ||    || &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.10&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  Read the paper of Paralinguistics&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||  prepare a PPT for biweekly report&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.11&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   ||  start to change the source code of Kaldi in order to implement retraining the nnet&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  Read the paper of Paralinguistics&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00|| 21:00 ||  11h   ||   do a biweekly report about STFT Spectrogram&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.12&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||   Read the paper of Paralinguistics&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00|| 21:00 ||  11h   ||   learn the srfft transform from kaldi.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.13&lt;br /&gt;
|Yanqing Wang ||  14:00  || 20:00   ||  6h    ||   start to change the source code of Kaldi in order to implement retraining the nnet&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  Make a plan for recording&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang || 10:00|| 21:00 ||  11h   ||  pick out the relationship between kaldi and python about spectrum transform.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.14&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  Read material from Teacher Li&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  || learn the nnet3 theory from tanghui&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.15&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   ||  change the source code of Kaldi in order to implement retraining the nnet&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||     ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  || do a speech conversion experiment with Python.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.16&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||     ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||   prepare a PPT for biweekly supplementary report&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.17&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || change the source code of Kaldi&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  check the book of deep learning&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||   do a biweekly supplementary report about speech conversion.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.18&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || change the source code of Kaldi&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  check the book of deep learning&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||  learn the papers about speaker recognition&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.19&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || after changing source code, compile Kaldi and redo the former exps&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||  11h    ||  work out the recording plan with instruction from Teacher Li&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||   repare the data set of Speaker Recognition : pick out whisper &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.20&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || redo the former exps&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  learnt kaldi and did experiments&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||    Learn the the nnet3 model&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.21&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || redo the former exps and test the conclusions&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  joined a meeting in Chinese Academy of Social Sciences&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||  run the nnet3 experiment &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.22&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||     ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.23&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||     ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.24&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || debug and find the wrong place of exps&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  test performances on 12-style database&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||  read papers about speaker recognition&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.25&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || redo the former exps and test the conclusions&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  test performances on 12-style database&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  || write a script to cut the vad of the wavefiles&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.26&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || redo the former exps&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  test performances on 12-style database&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  || write a script to convert 8bit to 16bit.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.27&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || redo the former exps&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||  11h    ||  test performances on 12-style database&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  ||  Finish the Speaker Recognition experiment：mouth with candy&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.28&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || conclude the exps and start to write a report&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||   11h   ||  test performances on 12-style database&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  || Finish the Speaker Recognition experiment：normal chat &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.29&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||     ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.30&lt;br /&gt;
|Yanqing Wang ||    ||    ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||    ||     ||      ||  &lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||    ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;4&amp;quot;|2017.7.31&lt;br /&gt;
|Yanqing Wang ||  11:00  ||  20:00  ||   9h   || learn to use LaTeX&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen ||     ||    ||      ||   &lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang   ||  9:00  ||  20:00   ||      ||  optimize the vad parameter to improve the performance&lt;br /&gt;
|-&lt;br /&gt;
| Xiaofei Kang ||10:00 ||  21:00  ||  11h  || read papers about i-vector and d-vector&lt;br /&gt;
|-&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-8-21</id>
		<title>ASR Status Report 2017-8-21</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-8-21"/>
				<updated>2017-08-21T05:02:14Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.8.21&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Absence &lt;br /&gt;
|| &lt;br /&gt;
* finish experiments on 5 recorded speech.&lt;br /&gt;
* Improve and test the human test website. &lt;br /&gt;
* Learn 4 papers from lantian : about speaker recongnition and deep speaker feature.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Prepare the data and finish experiments on 5 recorded speech.&lt;br /&gt;
* Finish the human test website(include 20 styles), express my apprecation to Shuai sister!&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* 1. align the candidate speech (fbank) with phone labels using nnet3-align-compiled (almost finished); 2.analyse the alignment with rhythm, tone, tune, for Parrot system, (revised goodness of pronunciation), to be done.&lt;br /&gt;
* collecting material (PPT) for Kaldi toolbook.&lt;br /&gt;
||&lt;br /&gt;
* analyse the alignment with rhythm, tone, tune, (revised goodness of pronunciation). &lt;br /&gt;
* toolbook writing&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
------------------------&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.8.14&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Recording 35 people audio, located in /work7/zhangmiao/speaker/wavdata/data_new&lt;br /&gt;
* Learn the new test website from zhangmiao&lt;br /&gt;
|| &lt;br /&gt;
* Go home with my mom, and come back on Friday night.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Recording work&lt;br /&gt;
* Test website's data preparation&lt;br /&gt;
* check the linear chapter&lt;br /&gt;
|| &lt;br /&gt;
* Continue to record&lt;br /&gt;
* do experiments on recorded speech if possible&lt;br /&gt;
* check the NN chapter&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/50/Connection_Sparseness.pdf TRP] uploaded.&lt;br /&gt;
* explore the importance of sparseness structure:&lt;br /&gt;
** After pruning, initialize non-zero values randomly, train.&lt;br /&gt;
** train nnet with 177-dimension hidden layer.&lt;br /&gt;
** [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangyanqing&amp;amp;step=view_request&amp;amp;cvssid=609 result]&lt;br /&gt;
||&lt;br /&gt;
* continue exploring the values of trained nnet.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* general codeMap finished(kazak)&lt;br /&gt;
* crawler program delayed(Most of the kazakh website is down. I will cralw data from overseas websites)&lt;br /&gt;
|| &lt;br /&gt;
* collect more Unicode. such as Tibetan, Mongolia.&lt;br /&gt;
* crawler kazak data from overseas websites.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* Study English and help Lantian do some Exps.&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* Visualization and quantification for d-vector [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e2/Spk_seg.pdf].&lt;br /&gt;
** phone-aware and phone-blind.&lt;br /&gt;
** within speaker variation and between speaker variation. &lt;br /&gt;
* Speaker segmentation Exps.&lt;br /&gt;
||&lt;br /&gt;
* Finish speaker segmentation Exp.&lt;br /&gt;
* Prepare IS17 presentation.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* reorganize auto-scoring system, next ???&lt;br /&gt;
* collecting material (PPT) for Kaldi toolbook.&lt;br /&gt;
||&lt;br /&gt;
* prefer to rewrite the scoring part.&lt;br /&gt;
* toolbook writing&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-8-7</id>
		<title>ASR Status Report 2017-8-7</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-8-7"/>
				<updated>2017-08-07T05:43:37Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.8.7&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Finish experiments of 12-style speech with ZhangMiao. (Results are shown in ZhangMiao's CVSS)&lt;br /&gt;
* Complete a part of the recording work: collecting six types of sound from 13 people.&lt;br /&gt;
|| &lt;br /&gt;
* Finish the recording work left with ZhangMiao&lt;br /&gt;
* Build a new test website with ZhangMiao&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Finish experiments of 12-style speech with Xiaofei. (Results are shown in CVSS)&lt;br /&gt;
* Build a new test website &lt;br /&gt;
|| &lt;br /&gt;
* Recording work&lt;br /&gt;
* Improve the website by decreasing salience segments and replenish other styles&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* setup server for m2asr [finished]&lt;br /&gt;
* design crawler program&lt;br /&gt;
|| &lt;br /&gt;
* finish the crawler program&lt;br /&gt;
* CodeMap for Tibetan&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Some functions of the auto-scoring system rewrited.&lt;br /&gt;
|| &lt;br /&gt;
* An app demo with Shuai Zhang. &lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
------------------------&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.7.31&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Finish the Speaker Recognition experiment：mouth with candy, normal chat&lt;br /&gt;
|| &lt;br /&gt;
* Understand all the scripts of the Speaker Recognition experiment, and then learn to modify it.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* finish the experiments on five kinds of speech&lt;br /&gt;
|| &lt;br /&gt;
* optimize the vad parameter to improve the performance&lt;br /&gt;
* finish the new human test website&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* retraining task: experiments are in progress, some time needed.&lt;br /&gt;
||&lt;br /&gt;
* all experiments should be done.&lt;br /&gt;
* TRP of retraining task.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* apply mongodb and ajax on the data checking website &lt;br /&gt;
** with mongodb we are not depend on file lock anymore&lt;br /&gt;
** there is no need to save web state(except some cookie) after employ ajax&lt;br /&gt;
* continue to learn crawler&lt;br /&gt;
|| &lt;br /&gt;
* setup server for m2asr (use sheep02)&lt;br /&gt;
* design crawler program&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* plot tsne picture for 863 &amp;amp; fisher-5000 data set&lt;br /&gt;
* find why performance of wisper better than performance of chat&lt;br /&gt;
|| &lt;br /&gt;
* check data and paper&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* T-sne plot for speaker segmentation preparation [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e2/Spk_seg.pdf].&lt;br /&gt;
* check TASLP and NIPS paper.&lt;br /&gt;
|| &lt;br /&gt;
* deep spk recipe.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Updated the auto-scoring system with the newest version of Kaldi. Several patches need to be repaired. &lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|| &lt;br /&gt;
* Initial version of auto-scoring system.&lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-8-7</id>
		<title>ASR Status Report 2017-8-7</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-8-7"/>
				<updated>2017-08-07T05:42:12Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.8.7&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Finish experiments of 12-style speech with ZhangMiao. (Results are shown in ZhangMiao's CVSS)&lt;br /&gt;
* Complete part of the recording work: collection of six types of sound from 13 people.&lt;br /&gt;
|| &lt;br /&gt;
* Finish the recording work left with ZhangMiao&lt;br /&gt;
* Build a new test website with ZhangMiao&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* Finish experiments of 12-style speech with Xiaofei. (Results are shown in CVSS)&lt;br /&gt;
* Build a new test website &lt;br /&gt;
|| &lt;br /&gt;
* Recording work&lt;br /&gt;
* Improve the website by decreasing salience segments and replenish other styles&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* setup server for m2asr [finished]&lt;br /&gt;
* design crawler program&lt;br /&gt;
|| &lt;br /&gt;
* finish the crawler program&lt;br /&gt;
* CodeMap for Tibetan&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Some functions of the auto-scoring system rewrited.&lt;br /&gt;
|| &lt;br /&gt;
* An app demo with Shuai Zhang. &lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
------------------------&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.7.31&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Finish the Speaker Recognition experiment：mouth with candy, normal chat&lt;br /&gt;
|| &lt;br /&gt;
* Understand all the scripts of the Speaker Recognition experiment, and then learn to modify it.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* finish the experiments on five kinds of speech&lt;br /&gt;
|| &lt;br /&gt;
* optimize the vad parameter to improve the performance&lt;br /&gt;
* finish the new human test website&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* retraining task: experiments are in progress, some time needed.&lt;br /&gt;
||&lt;br /&gt;
* all experiments should be done.&lt;br /&gt;
* TRP of retraining task.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* apply mongodb and ajax on the data checking website &lt;br /&gt;
** with mongodb we are not depend on file lock anymore&lt;br /&gt;
** there is no need to save web state(except some cookie) after employ ajax&lt;br /&gt;
* continue to learn crawler&lt;br /&gt;
|| &lt;br /&gt;
* setup server for m2asr (use sheep02)&lt;br /&gt;
* design crawler program&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* plot tsne picture for 863 &amp;amp; fisher-5000 data set&lt;br /&gt;
* find why performance of wisper better than performance of chat&lt;br /&gt;
|| &lt;br /&gt;
* check data and paper&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* T-sne plot for speaker segmentation preparation [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e2/Spk_seg.pdf].&lt;br /&gt;
* check TASLP and NIPS paper.&lt;br /&gt;
|| &lt;br /&gt;
* deep spk recipe.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Updated the auto-scoring system with the newest version of Kaldi. Several patches need to be repaired. &lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|| &lt;br /&gt;
* Initial version of auto-scoring system.&lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-7-31</id>
		<title>ASR Status Report 2017-7-31</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-7-31"/>
				<updated>2017-07-31T04:31:02Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.7.31&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Finish the Speaker Recognition experiment：mouth with candy, normal chat&lt;br /&gt;
|| &lt;br /&gt;
* Understand all the scripts of the Speaker Recognition experiment, and then learn to modify it.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* finish the experiments on five kinds of speech&lt;br /&gt;
|| &lt;br /&gt;
* optimize the vad parameter to improve the performance&lt;br /&gt;
* finish the new human test website&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* retraining task: experiments are in progress, some time needed.&lt;br /&gt;
||&lt;br /&gt;
* all experiments should be done.&lt;br /&gt;
* TRP of retraining task.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* apply mongodb and ajax on the data checking website &lt;br /&gt;
** with mongodb we are not depend on file lock anymore&lt;br /&gt;
** there is no need to save web state(except some cookie) after employ ajax&lt;br /&gt;
* continue to learn crawler&lt;br /&gt;
|| &lt;br /&gt;
* setup server for m2asr (use sheep02)&lt;br /&gt;
* design crawler program&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* check paper&lt;br /&gt;
* plot tsne picture for 863 &amp;amp; fisher-5000 data set&lt;br /&gt;
* find why performance of wisper better than performance of chat&lt;br /&gt;
|| &lt;br /&gt;
* check data and paper&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Updated the auto-scoring system with the newest version of Kaldi. Several patches need to be repaired. &lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|| &lt;br /&gt;
* Initial version of auto-scoring system.&lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
------------------------&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.7.24&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Prepare the data set of Speaker Recognition : pick out whisper&lt;br /&gt;
* Learn the the nnet3 model, run the nnet3 experiment &lt;br /&gt;
|| &lt;br /&gt;
* Learn the Speaker Recognition model, run the Speaker Recognition experiment&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* joined a meeting in Chinese Academy of Social Sciences&lt;br /&gt;
* worked out a recording plan&lt;br /&gt;
* learnt kaldi and did experiments&lt;br /&gt;
|| &lt;br /&gt;
* test performances on 12 kinds of voices we have&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Hui Tang &lt;br /&gt;
|| &lt;br /&gt;
* help jiayin to configure dnn and lstm in kaldi&lt;br /&gt;
|| &lt;br /&gt;
* left for postgraduate life&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* change the source code of Kaldi to implement retraining ( with zero value fixed )&lt;br /&gt;
* start to write a technical report of pruning the neural network ( not finished ) &lt;br /&gt;
||&lt;br /&gt;
* finish the retraining task&lt;br /&gt;
* finish the technical report&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* data checking website&lt;br /&gt;
* learn how to write a crawler program&lt;br /&gt;
|| &lt;br /&gt;
* write a more general crawler&lt;br /&gt;
* realign kazak train and test data with transfer learning model &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* use wisper audio for speaker recognition&lt;br /&gt;
* joined a meeting in Chinese Academy of Social Sciences&lt;br /&gt;
|| &lt;br /&gt;
* test performances on 12 kinds of voices&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* deepspk on TASLP.&lt;br /&gt;
* speaker segmentation.&lt;br /&gt;
|| &lt;br /&gt;
* recipe of deepspk.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Replaced ATLAS lib with MKL lib for compiling auto-scoring system.&lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|| &lt;br /&gt;
* A basic demo for auto-scoring system. &lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	<entry>
		<id>http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-7-31</id>
		<title>ASR Status Report 2017-7-31</title>
		<link rel="alternate" type="text/html" href="http://www.cslt.org/mediawiki/index.php/ASR_Status_Report_2017-7-31"/>
				<updated>2017-07-31T04:30:36Z</updated>
		
		<summary type="html">&lt;p&gt;Kangxf：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.7.31&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Finish the Speaker Recognition experiment：monuth with candy, normal chat&lt;br /&gt;
|| &lt;br /&gt;
* Understand all the scripts of the Speaker Recognition experiment, and then learn to modify it.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* finish the experiments on five kinds of speech&lt;br /&gt;
|| &lt;br /&gt;
* optimize the vad parameter to improve the performance&lt;br /&gt;
* finish the new human test website&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* retraining task: experiments are in progress, some time needed.&lt;br /&gt;
||&lt;br /&gt;
* all experiments should be done.&lt;br /&gt;
* TRP of retraining task.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* apply mongodb and ajax on the data checking website &lt;br /&gt;
** with mongodb we are not depend on file lock anymore&lt;br /&gt;
** there is no need to save web state(except some cookie) after employ ajax&lt;br /&gt;
* continue to learn crawler&lt;br /&gt;
|| &lt;br /&gt;
* setup server for m2asr (use sheep02)&lt;br /&gt;
* design crawler program&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* check paper&lt;br /&gt;
* plot tsne picture for 863 &amp;amp; fisher-5000 data set&lt;br /&gt;
* find why performance of wisper better than performance of chat&lt;br /&gt;
|| &lt;br /&gt;
* check data and paper&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Updated the auto-scoring system with the newest version of Kaldi. Several patches need to be repaired. &lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|| &lt;br /&gt;
* Initial version of auto-scoring system.&lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
------------------------&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!Date!!People !! Last Week !! This Week&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;8&amp;quot;|2017.7.24&lt;br /&gt;
&lt;br /&gt;
|Xiaofei Kang&lt;br /&gt;
|| &lt;br /&gt;
* Prepare the data set of Speaker Recognition : pick out whisper&lt;br /&gt;
* Learn the the nnet3 model, run the nnet3 experiment &lt;br /&gt;
|| &lt;br /&gt;
* Learn the Speaker Recognition model, run the Speaker Recognition experiment&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Miao Zhang&lt;br /&gt;
|| &lt;br /&gt;
* joined a meeting in Chinese Academy of Social Sciences&lt;br /&gt;
* worked out a recording plan&lt;br /&gt;
* learnt kaldi and did experiments&lt;br /&gt;
|| &lt;br /&gt;
* test performances on 12 kinds of voices we have&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Hui Tang &lt;br /&gt;
|| &lt;br /&gt;
* help jiayin to configure dnn and lstm in kaldi&lt;br /&gt;
|| &lt;br /&gt;
* left for postgraduate life&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yanqing Wang&lt;br /&gt;
|| &lt;br /&gt;
* change the source code of Kaldi to implement retraining ( with zero value fixed )&lt;br /&gt;
* start to write a technical report of pruning the neural network ( not finished ) &lt;br /&gt;
||&lt;br /&gt;
* finish the retraining task&lt;br /&gt;
* finish the technical report&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi  &lt;br /&gt;
|| &lt;br /&gt;
* data checking website&lt;br /&gt;
* learn how to write a crawler program&lt;br /&gt;
|| &lt;br /&gt;
* write a more general crawler&lt;br /&gt;
* realign kazak train and test data with transfer learning model &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yixiang Chen  &lt;br /&gt;
|| &lt;br /&gt;
* use wisper audio for speaker recognition&lt;br /&gt;
* joined a meeting in Chinese Academy of Social Sciences&lt;br /&gt;
|| &lt;br /&gt;
* test performances on 12 kinds of voices&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li  &lt;br /&gt;
|| &lt;br /&gt;
* deepspk on TASLP.&lt;br /&gt;
* speaker segmentation.&lt;br /&gt;
|| &lt;br /&gt;
* recipe of deepspk.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhiyuan Tang &lt;br /&gt;
|| &lt;br /&gt;
* Replaced ATLAS lib with MKL lib for compiling auto-scoring system.&lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|| &lt;br /&gt;
* A basic demo for auto-scoring system. &lt;br /&gt;
* Kaldi book writing.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Kangxf</name></author>	</entry>

	</feed>