|
67 | 67 | </tr>
|
68 | 68 | </table>
|
69 | 69 |
|
| 70 | + <table width="880" border="0" align="center" cellspacing="0" cellpadding="0"> |
| 71 | + <tr> |
| 72 | + <td style="width:35%; vertical-align:middle; padding-right: 20px;"> |
| 73 | + <div class="image-container"> |
| 74 | + <img src='publications/2024_AnyCar.gif' width="85%"> |
| 75 | + </div> |
| 76 | + </td> |
| 77 | + <td style="width:65%; vertical-align:middle"> |
| 78 | + <papertitle>AnyCar to Anywhere: Learning Universal Dynamics Model for Agile and Adaptive Mobility</papertitle> |
| 79 | + <br> |
| 80 | + Wenli Xiao<sup>*</sup>, Haoru Xue<sup>*</sup>, Tony Tao, Dvij Kalaria, John M. Dolan, Guanya Shi |
| 81 | + <br> |
| 82 | + <a href="https://arxiv.org/abs/2409.15783" target="_blank"><i class="far fa-file"></i> paper</a>   |
| 83 | + <a href="https://lecar-lab.github.io/anycar/" target="_blank"><i class="fas fa-globe"></i> website</a>   |
| 84 | + <a href="https://github.com/LeCAR-Lab/lecar-car" target="_blank"><i class="fas fa-code"></i> code</a> |
| 85 | + <p style="margin-top: 5px"><i class="fas fa-comment-dots"></i> TL;DR: AnyCar is a transformer-based dynamics model that can adapt to various vehicles, environments, state estimators, and tasks. |
| 86 | + </p> |
| 87 | + </td> |
| 88 | + </tr> |
| 89 | + </table> |
| 90 | + |
| 91 | + <br> |
| 92 | + |
70 | 93 | <table width="880" border="0" align="center" cellspacing="0" cellpadding="0">
|
71 | 94 | <tr>
|
72 | 95 | <td style="width:35%; vertical-align:middle; padding-right: 20px;">
|
|
111 | 134 | </tr>
|
112 | 135 | </table>
|
113 | 136 |
|
114 |
| - <br> |
| 137 | + <br> |
| 138 | + <br> |
| 139 | + |
| 140 | + <table width="880" border="0" align="center" cellspacing="0" cellpadding="0"> |
| 141 | + <tr> |
| 142 | + <td width="100%" valign="middle"> |
| 143 | + <heading>2024</heading> |
| 144 | + </td> |
| 145 | + </tr> |
| 146 | + </table> |
115 | 147 |
|
116 | 148 | <table width="880" border="0" align="center" cellspacing="0" cellpadding="0">
|
117 | 149 | <tr>
|
|
125 | 157 | <br>
|
126 | 158 | Xiaofeng Guo<sup>*</sup>, Guanqi He<sup>*</sup>, Jiahe Xu, Mohammadreza Mousaei, Junyi Geng, Sebastian Scherer, Guanya Shi
|
127 | 159 | <br>
|
| 160 | + <em>IEEE Robotics and Automation Letters (RA-L)</em>, 2024 |
| 161 | + <br> |
128 | 162 | <a href="https://arxiv.org/abs/2407.05587" target="_blank"><i class="far fa-file"></i> paper</a>  
|
129 | 163 | <a href="https://xiaofeng-guo.github.io/flying-calligrapher/" target="_blank"><i class="fas fa-globe"></i> website</a>  
|
130 | 164 | <a href="https://spectrum.ieee.org/video-friday-unitree-talks-robots" target="_blank"><i class="fas fa-newspaper"></i> IEEE Spectrum</a>
|
|
134 | 168 | </tr>
|
135 | 169 | </table>
|
136 | 170 |
|
137 |
| - <br> |
138 |
| - <br> |
139 |
| - |
140 |
| - <table width="880" border="0" align="center" cellspacing="0" cellpadding="0"> |
141 |
| - <tr> |
142 |
| - <td width="100%" valign="middle"> |
143 |
| - <heading>2024</heading> |
144 |
| - </td> |
145 |
| - </tr> |
146 |
| - </table> |
| 171 | + <br> |
147 | 172 |
|
148 | 173 | <table width="880" border="0" align="center" cellspacing="0" cellpadding="0">
|
149 | 174 | <tr>
|
|
0 commit comments